diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh index 45f27299962..b09df07b552 100755 --- a/.ci/linux-build.sh +++ b/.ci/linux-build.sh @@ -92,12 +92,16 @@ fi OPTS="$OPTS -Dplatform=generic" OPTS="$OPTS -Ddefault_library=$DEF_LIB" OPTS="$OPTS -Dbuildtype=$buildtype" -OPTS="$OPTS -Dcheck_includes=true" +if [ "$STDATOMIC" = "true" ]; then + OPTS="$OPTS -Denable_stdatomic=true" +else + OPTS="$OPTS -Dcheck_includes=true" +fi if [ "$MINI" = "true" ]; then OPTS="$OPTS -Denable_drivers=net/null" OPTS="$OPTS -Ddisable_libs=*" else - OPTS="$OPTS -Ddisable_libs=" + OPTS="$OPTS -Denable_deprecated_libs=*" fi OPTS="$OPTS -Dlibdir=lib" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7a2ac0ceeee..272a6ffc7f3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,6 +30,7 @@ jobs: REF_GIT_TAG: none RISCV64: ${{ matrix.config.cross == 'riscv64' }} RUN_TESTS: ${{ contains(matrix.config.checks, 'tests') }} + STDATOMIC: ${{ contains(matrix.config.checks, 'stdatomic') }} strategy: fail-fast: false @@ -38,6 +39,12 @@ jobs: - os: ubuntu-20.04 compiler: gcc mini: mini + - os: ubuntu-20.04 + compiler: gcc + checks: stdatomic + - os: ubuntu-20.04 + compiler: clang + checks: stdatomic - os: ubuntu-20.04 compiler: gcc checks: debug+doc+examples+tests @@ -125,7 +132,7 @@ jobs: run: sudo apt install -y gdb jq - name: Install doc generation packages if: env.BUILD_DOCS == 'true' - run: sudo apt install -y doxygen graphviz python3-sphinx + run: sudo apt install -y doxygen graphviz man-db python3-sphinx python3-sphinx-rtd-theme - name: Run setup run: | @@ -241,6 +248,7 @@ jobs: > ~/env echo CC=ccache ${{ matrix.config.compiler }} >> ~/env echo DEF_LIB=${{ matrix.config.library }} >> ~/env + echo STDATOMIC=false >> ~/env - name: Load the cached image run: | docker load -i ~/.image/${{ matrix.config.image }}.tar diff --git a/.mailmap b/.mailmap index 864d33ee46f..3f5bab26a81 100644 --- a/.mailmap +++ b/.mailmap @@ -30,6 +30,7 @@ Akeem G Abodunrin Akhil Goyal Akihiko Odaki Alain Leon +Alan Brady Alan Carew Alan Dewar Alan Liu @@ -98,6 +99,7 @@ Andrew Lee Andrew Pinski Andrew Rybchenko Andrey Chilikin +Andrey Ignatov Andrey Nikolaev Andrey Vesnovaty Andrii Pypchenko @@ -106,7 +108,7 @@ Andriy Berestovskyy Andrzej Ostruszka Andy Gospodarek Andy Green -Andy Moreton +Andy Moreton Andy Pei Anirudh Venkataramanan Ankur Dwivedi @@ -126,6 +128,7 @@ Arnaud Fiorini Arnon Warshavsky Arshdeep Kaur Artemii Morozov +Artemy Kovalyov Artem V. Andreev Artur Rojek Artur Trybula @@ -200,6 +203,7 @@ Carolyn Wyborny Chaeyong Chong Chaitanya Babu Talluri Chandubabu Namburu +Chang Miao Changchun Ouyang Changpeng Liu Changqing Wu @@ -213,7 +217,7 @@ Chenbo Xia Chengchang Tang Chengfeng Ye Chenghu Yao -Cheng Jiang +Cheng Jiang Chenglian Sun Cheng Liu Cheng Peng @@ -538,6 +542,7 @@ Ido Barnea Ido Goshen Ido Segev Igor Chauskin +Igor de Paula Igor Romanov Igor Russkikh Igor Ryzhov @@ -590,6 +595,7 @@ Jay Ding Jay Jayatheerthan Jay Rolette Jay Zhou +Jayaprakash Shanmugam Jean Dao Jean-Mickael Guerin Jean Tourrilhes @@ -623,7 +629,7 @@ Jiaqi Min Jiawei Wang Jiawei Zhu Jiawen Wu -Jiayu Hu +Jiayu Hu Jia Yu Jie Hai Jie Liu @@ -664,15 +670,17 @@ John McNamara John Miller John OLoughlin John Ousterhout +John Romein John W. Linville Jonas Pfefferle -Jonathan Erb +Jonathan Erb Jon DeVree Jon Loeliger Joongi Kim Jørgen Østergaard Sloth Jörg Thalheim Joseph Richard +Josh Hay Josh Soref Joshua Hay Joshua Washington @@ -682,6 +690,7 @@ JP Lee Juan Antonio Montesinos Juhamatti Kuusisaari Juho Snellman +Julian Grajkowski Julien Aube Julien Castets Julien Courtat @@ -747,6 +756,7 @@ Krzysztof Galazka Krzysztof Kanas Krzysztof Karas Krzysztof Witek +Kuan Xu Kuba Kozak Kumar Amber Kumara Parameshwaran @@ -824,6 +834,7 @@ Maciej Machnikowski Maciej Paczkowski Maciej Rabeda Maciej Szwed +Madhu Chittim Madhuker Mythri Mahipal Challa Mah Yock Gen @@ -1080,6 +1091,7 @@ Peng Yu Peng Zhang Pengzhen Liu Peter Mccarthy +Peter Nilsson Peter Spreadborough Petr Houska Phanendra Vukkisala @@ -1208,10 +1220,12 @@ Sagi Grimberg Saikrishna Edupuganti Saleh Alsouqi Salem Sol +Sam Andrew Sameh Gobriel Sam Grove Samik Gupta Samina Arshad +Sampath Peechu Samuel Gauthier Sandilya Bhagi Sangjin Han @@ -1227,6 +1241,7 @@ Satananda Burla Satha Rao Satheesh Paul Sathesh Edara +Saurabh Singhal Savinay Dharmappa Scott Branden Scott Daniels @@ -1252,6 +1267,7 @@ Shahaf Shuler Shahaji Bhosle Shahed Shaikh Shai Brandes +Shailendra Bhatnagar Shally Verma Shannon Nelson Shannon Zhao @@ -1265,6 +1281,7 @@ Shelton Chia Shepard Siegel Shesha Sreenivasamurthy Shibin Koikkara Reeny +Shihong Wang Shijith Thotton Shiqi Liu <835703180@qq.com> Shiri Kuzin @@ -1285,15 +1302,18 @@ Shu Shen Shweta Choudaha Shyam Kumar Shrivastav Shy Shyman +Sibaranjan Pattnayak Siddaraju DH Simei Su Simon Ellmann Simon Horman Simon Kagstrom Simon Kuenzer +Sinan Kaya Siobhan Butler Sirshak Das Sivaprasad Tummala +Sivaramakrishnan Venkat Siwar Zitouni Slawomir Mrozowicz Slawomir Rosek @@ -1411,6 +1431,7 @@ Tom Barbette Tom Crugnale Tom Millington Tom Rix +Tomer Shmilovich Tone Zhang Tonghao Zhang Tony Nguyen @@ -1481,6 +1502,7 @@ Wei Dai Weifeng Li Weiguo Li Wei Huang +Wei Hu Wei Hu (Xavier) WeiJie Zhuang Weiliang Luo @@ -1520,6 +1542,7 @@ Xiaofeng Liu Xiaohua Zhang Xiao Liang Xiaolong Ye +Xiaoming Jiang Xiaonan Zhang Xiao Wang Xiaoxiao Zeng @@ -1618,6 +1641,7 @@ Zhangkun Zhaochen Zhan Zhaoyan Chen Zhenghua Zhou +Zhenning Xiao Zhe Tao Zhichao Zeng Zhigang Lu diff --git a/MAINTAINERS b/MAINTAINERS index 8c3f2c993f3..4083658697f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -155,6 +155,7 @@ F: doc/guides/prog_guide/env_abstraction_layer.rst F: app/test/test_alarm.c F: app/test/test_atomic.c F: app/test/test_barrier.c +F: app/test/test_bitcount.c F: app/test/test_byteorder.c F: app/test/test_common.c F: app/test/test_cpuflags.c @@ -320,7 +321,6 @@ M: Bruce Richardson M: Konstantin Ananyev F: config/x86/ F: doc/guides/linux_gsg/nic_perf_intel_platform.rst -F: buildtools/binutils-avx512-check.py F: doc/guides/howto/avx512.rst F: lib/eal/x86/ F: lib/*/*_sse* @@ -378,7 +378,6 @@ Core Libraries T: git://dpdk.org/dpdk Memory pool -M: Olivier Matz M: Andrew Rybchenko F: lib/mempool/ F: drivers/mempool/ring/ @@ -395,14 +394,12 @@ F: app/test/test_ring* F: app/test/test_func_reentrancy.c Stack -M: Olivier Matz F: lib/stack/ F: drivers/mempool/stack/ F: app/test/test_stack* F: doc/guides/prog_guide/stack_lib.rst Packet buffer -M: Olivier Matz F: lib/mbuf/ F: doc/guides/prog_guide/mbuf_lib.rst F: app/test/test_mbuf.c @@ -540,6 +537,13 @@ F: lib/eventdev/*crypto_adapter* F: app/test/test_event_crypto_adapter.c F: doc/guides/prog_guide/event_crypto_adapter.rst +Eventdev DMA Adapter API +M: Amit Prakash Shukla +T: git://dpdk.org/next/dpdk-next-eventdev +F: lib/eventdev/*dma_adapter* +F: app/test/test_event_dma_adapter.c +F: doc/guides/prog_guide/event_dma_adapter.rst + Raw device API M: Sachin Saxena M: Hemant Agrawal @@ -609,8 +613,8 @@ F: drivers/bus/vmbus/ Networking Drivers ------------------ -M: Ferruh Yigit T: git://dpdk.org/next/dpdk-next-net +F: drivers/net/meson.build F: doc/guides/nics/features/default.ini Link bonding @@ -644,7 +648,7 @@ F: doc/guides/nics/ena.rst F: doc/guides/nics/features/ena.ini AMD axgbe -M: Chandubabu Namburu +M: Selwin Sebastian F: drivers/net/axgbe/ F: doc/guides/nics/axgbe.rst F: doc/guides/nics/features/axgbe.ini @@ -718,7 +722,7 @@ F: doc/guides/nics/gve.rst F: doc/guides/nics/features/gve.ini Hisilicon hns3 -M: Dongdong Liu +M: Jie Hai M: Yisen Zhuang F: drivers/net/hns3/ F: doc/guides/nics/hns3.rst @@ -896,7 +900,6 @@ F: doc/guides/nics/features/nfb.ini Netronome nfp M: Chaoyong He -M: Niklas Soderlund F: drivers/net/nfp/ F: doc/guides/nics/nfp.rst F: doc/guides/nics/features/nfp*.ini @@ -1057,6 +1060,7 @@ F: doc/guides/nics/features/memif.ini Crypto Drivers -------------- T: git://dpdk.org/next/dpdk-next-crypto +F: drivers/crypto/meson.build F: doc/guides/cryptodevs/features/default.ini AMD CCP Crypto @@ -1193,6 +1197,7 @@ F: doc/guides/cryptodevs/features/virtio.ini Compression Drivers ------------------- T: git://dpdk.org/next/dpdk-next-crypto +F: drivers/compress/meson.build Cavium OCTEON TX zipvf M: Ashish Gupta @@ -1244,6 +1249,7 @@ F: doc/guides/dmadevs/hisilicon.rst Marvell CNXK DPI DMA M: Vamsi Attunuru +T: git://dpdk.org/next/dpdk-next-net-mrvl F: drivers/dma/cnxk/ F: doc/guides/dmadevs/cnxk.rst @@ -1265,6 +1271,7 @@ RegEx Drivers Marvell OCTEON CN9K regex M: Liron Himi +T: git://dpdk.org/next/dpdk-next-net-mrvl F: drivers/regex/cn9k/ F: doc/guides/regexdevs/cn9k.rst F: doc/guides/regexdevs/features/cn9k.ini @@ -1281,6 +1288,7 @@ MLdev Drivers Marvell ML CNXK M: Srikanth Yalavarthi +T: git://dpdk.org/next/dpdk-next-net-mrvl F: drivers/common/cnxk/hw/ml.h F: drivers/common/cnxk/roc_ml* F: drivers/ml/cnxk/ @@ -1290,6 +1298,7 @@ F: doc/guides/mldevs/cnxk.rst vDPA Drivers ------------ T: git://dpdk.org/next/dpdk-next-virtio +F: drivers/vdpa/meson.build Intel ifc M: Xiao Wang @@ -1313,8 +1322,8 @@ F: doc/guides/vdpadevs/features/sfc.ini Eventdev Drivers ---------------- -M: Jerin Jacob T: git://dpdk.org/next/dpdk-next-eventdev +F: drivers/event/meson.build Cavium OCTEON TX ssovf M: Jerin Jacob @@ -1326,7 +1335,7 @@ M: Pavan Nikhilesh F: drivers/event/octeontx/timvf_* Intel DLB2 -M: Timothy McDaniel +M: Abdullah Sevincer F: drivers/event/dlb2/ F: doc/guides/eventdevs/dlb2.rst @@ -1369,10 +1378,11 @@ F: doc/guides/eventdevs/opdl.rst Baseband Drivers ---------------- +T: git://dpdk.org/next/dpdk-next-baseband +F: drivers/baseband/meson.build Intel baseband M: Nicolas Chautru -T: git://dpdk.org/next/dpdk-next-baseband F: drivers/baseband/turbo_sw/ F: doc/guides/bbdevs/turbo_sw.rst F: doc/guides/bbdevs/features/turbo_sw.ini @@ -1391,7 +1401,6 @@ F: doc/guides/bbdevs/features/vrb1.ini Null baseband M: Nicolas Chautru -T: git://dpdk.org/next/dpdk-next-baseband F: drivers/baseband/null/ F: doc/guides/bbdevs/null.rst F: doc/guides/bbdevs/features/null.ini @@ -1399,7 +1408,6 @@ F: doc/guides/bbdevs/features/null.ini NXP LA12xx M: Gagandeep Singh M: Hemant Agrawal -T: git://dpdk.org/next/dpdk-next-baseband F: drivers/baseband/la12xx/ F: doc/guides/bbdevs/la12xx.rst F: doc/guides/bbdevs/features/la12xx.ini @@ -1427,12 +1435,14 @@ F: doc/guides/rawdevs/ifpga.rst Marvell CNXK BPHY M: Jakub Palider M: Tomasz Duszynski +T: git://dpdk.org/next/dpdk-next-net-mrvl F: doc/guides/rawdevs/cnxk_bphy.rst F: drivers/raw/cnxk_bphy/ Marvell CNXK GPIO M: Jakub Palider M: Tomasz Duszynski +T: git://dpdk.org/next/dpdk-next-net-mrvl F: doc/guides/rawdevs/cnxk_gpio.rst F: drivers/raw/cnxk_gpio/ @@ -1454,10 +1464,10 @@ Packet processing ----------------- Network headers -M: Olivier Matz F: lib/net/ F: app/test/test_cksum.c F: app/test/test_cksum_perf.c +F: app/test/test_net_ether.c Packet CRC M: Jasvinder Singh @@ -1479,12 +1489,12 @@ F: examples/ip_reassembly/ F: doc/guides/sample_app_ug/ip_reassembly.rst Generic Receive Offload - EXPERIMENTAL -M: Jiayu Hu +M: Jiayu Hu F: lib/gro/ F: doc/guides/prog_guide/generic_receive_offload_lib.rst Generic Segmentation Offload -M: Jiayu Hu +M: Jiayu Hu F: lib/gso/ F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst @@ -1634,7 +1644,6 @@ F: app/test/test_cfgfile.c F: app/test/test_cfgfiles/ Interactive command line -M: Olivier Matz F: lib/cmdline/ F: app/test-cmdline/ F: app/test/test_cmdline* @@ -1642,7 +1651,6 @@ F: examples/cmdline/ F: doc/guides/sample_app_ug/cmd_line.rst Key/Value parsing -M: Olivier Matz F: lib/kvargs/ F: app/test/test_kvargs.c @@ -1676,6 +1684,12 @@ F: app/test/test_timer* F: examples/timer/ F: doc/guides/sample_app_ug/timer.rst +Dispatcher - EXPERIMENTAL +M: Mattias Rönnblom +F: lib/dispatcher/ +F: app/test/test_dispatcher.c +F: doc/guides/prog_guide/dispatcher_lib.rst + Job statistics F: lib/jobstats/ F: examples/l2fwd-jobstats/ @@ -1698,6 +1712,7 @@ Telemetry M: Ciara Power F: lib/telemetry/ F: app/test/test_telemetry* +F: app/test/suites/test_telemetry.sh F: usertools/dpdk-telemetry* F: doc/guides/howto/telemetry.rst @@ -1731,7 +1746,6 @@ Test Applications Unit tests framework F: app/test/commands.c -F: app/test/has_hugepage.py F: app/test/packet_burst_generator.c F: app/test/packet_burst_generator.h F: app/test/process.h @@ -1742,6 +1756,8 @@ F: app/test/test_pmd_perf.c F: app/test/test_resource.c F: app/test/virtual_pmd.c F: app/test/virtual_pmd.h +F: buildtools/has-hugepages.py +F: buildtools/get-test-suites.py Sample packet helper functions for unit test M: Reshma Pattan @@ -1756,7 +1772,7 @@ F: app/test-pmd/ F: doc/guides/testpmd_app_ug/ DMA device performance tool -M: Cheng Jiang +M: Cheng Jiang F: app/test-dma-perf/ F: doc/guides/tools/dmaperf.rst diff --git a/VERSION b/VERSION index 1d4e4e7927f..85415509edd 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -23.11.0-rc0 +23.11.0-rc1 diff --git a/app/meson.build b/app/meson.build index 4fc1a83ebad..e4bf5c531c4 100644 --- a/app/meson.build +++ b/app/meson.build @@ -1,6 +1,10 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017-2019 Intel Corporation +if is_ms_compiler + subdir_done() +endif + disable_apps = ',' + get_option('disable_apps') disable_apps = run_command(list_dir_globs, disable_apps, check: true).stdout().split() @@ -32,14 +36,17 @@ apps = [ 'test-security-perf', ] +if get_option('tests') +# build the auto test app if enabled. + apps += 'test' +endif + default_cflags = machine_args + ['-DALLOW_EXPERIMENTAL_API'] default_ldflags = [] if get_option('default_library') == 'static' and not is_windows default_ldflags += ['-Wl,--export-dynamic'] endif -enabled_apps = [] # used to print summary at the end - foreach app:apps name = app build = true @@ -90,13 +97,13 @@ foreach app:apps continue endif - enabled_apps += app + dpdk_apps_enabled += app link_libs = [] if get_option('default_library') == 'static' link_libs = dpdk_static_libraries + dpdk_drivers endif - executable('dpdk-' + name, + exec = executable('dpdk-' + name, sources, c_args: cflags, link_args: ldflags, @@ -105,7 +112,9 @@ foreach app:apps include_directories: includes, install_rpath: join_paths(get_option('prefix'), driver_install_path), install: true) + if name == 'test' + dpdk_test = exec + autotest_sources = sources + subdir('test/suites') # define the pre-canned test suites + endif endforeach - -# special case the autotests -subdir('test') diff --git a/app/proc-info/main.c b/app/proc-info/main.c index 88cee0ca487..ce53bc30dfe 100644 --- a/app/proc-info/main.c +++ b/app/proc-info/main.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -909,24 +908,23 @@ metrics_display(int port_id) return; } - metrics = rte_malloc("proc_info_metrics", - sizeof(struct rte_metric_value) * len, 0); + metrics = malloc(sizeof(struct rte_metric_value) * len); if (metrics == NULL) { printf("Cannot allocate memory for metrics\n"); return; } - names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); + names = malloc(sizeof(struct rte_metric_name) * len); if (names == NULL) { printf("Cannot allocate memory for metrics names\n"); - rte_free(metrics); + free(metrics); return; } if (len != rte_metrics_get_names(names, len)) { printf("Cannot get metrics names\n"); - rte_free(metrics); - rte_free(names); + free(metrics); + free(names); return; } @@ -938,8 +936,8 @@ metrics_display(int port_id) ret = rte_metrics_get_values(port_id, metrics, len); if (ret < 0 || ret > len) { printf("Cannot get metrics values\n"); - rte_free(metrics); - rte_free(names); + free(metrics); + free(names); return; } @@ -948,8 +946,8 @@ metrics_display(int port_id) printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value); printf("%s############################\n", nic_stats_border); - rte_free(metrics); - rte_free(names); + free(metrics); + free(names); } #endif @@ -990,7 +988,7 @@ show_offloads(uint64_t offloads, { printf(" offloads :"); while (offloads != 0) { - uint64_t offload_flag = 1ULL << __builtin_ctzll(offloads); + uint64_t offload_flag = 1ULL << rte_ctz64(offloads); printf(" %s", show_offload(offload_flag)); offloads &= ~offload_flag; } diff --git a/app/test-bbdev/meson.build b/app/test-bbdev/meson.build index cd6a5089d54..926e0a52712 100644 --- a/app/test-bbdev/meson.build +++ b/app/test-bbdev/meson.build @@ -23,6 +23,6 @@ endif if dpdk_conf.has('RTE_BASEBAND_ACC') deps += ['baseband_acc'] endif -if dpdk_conf.has('RTE_LIBRTE_PMD_BBDEV_LA12XX') +if dpdk_conf.has('RTE_BASEBAND_LA12XX') deps += ['baseband_la12xx'] endif diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 93b9bfb240e..84945d1313b 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -749,8 +749,7 @@ create_ipsec_session(struct rte_mempool *sess_mp, else sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx(dev_id); + void *ctx = rte_cryptodev_get_sec_ctx(dev_id); /* Create security session */ return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp); @@ -853,8 +852,7 @@ cperf_create_session(struct rte_mempool *sess_mp, .crypto_xform = &cipher_xform }; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx(dev_id); + void *ctx = rte_cryptodev_get_sec_ctx(dev_id); /* Create security session */ return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp); @@ -901,8 +899,7 @@ cperf_create_session(struct rte_mempool *sess_mp, } }, .crypto_xform = &cipher_xform }; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx(dev_id); + void *ctx = rte_cryptodev_get_sec_ctx(dev_id); /* Create security session */ return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp); diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c index f1676a9aa94..484bc9eb4e1 100644 --- a/app/test-crypto-perf/cperf_test_latency.c +++ b/app/test-crypto-perf/cperf_test_latency.c @@ -53,8 +53,7 @@ cperf_latency_test_free(struct cperf_latency_ctx *ctx) else if (ctx->options->op_type == CPERF_PDCP || ctx->options->op_type == CPERF_DOCSIS || ctx->options->op_type == CPERF_IPSEC) { - struct rte_security_ctx *sec_ctx = - rte_cryptodev_get_sec_ctx(ctx->dev_id); + void *sec_ctx = rte_cryptodev_get_sec_ctx(ctx->dev_id); rte_security_session_destroy(sec_ctx, ctx->sess); } #endif diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index 0307e829969..4a60f6d558c 100644 --- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -67,11 +67,9 @@ cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx) #ifdef RTE_LIB_SECURITY if (ctx->options->op_type == CPERF_PDCP || ctx->options->op_type == CPERF_DOCSIS) { - struct rte_security_ctx *sec_ctx = - (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx(ctx->dev_id); - rte_security_session_destroy(sec_ctx, - (void *)ctx->sess); + void *sec_ctx = rte_cryptodev_get_sec_ctx(ctx->dev_id); + + rte_security_session_destroy(sec_ctx, (void *)ctx->sess); } else #endif rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess); diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c index e892a70699e..f8f8bd717f0 100644 --- a/app/test-crypto-perf/cperf_test_throughput.c +++ b/app/test-crypto-perf/cperf_test_throughput.c @@ -44,12 +44,9 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx) else if (ctx->options->op_type == CPERF_PDCP || ctx->options->op_type == CPERF_DOCSIS || ctx->options->op_type == CPERF_IPSEC) { - struct rte_security_ctx *sec_ctx = - (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx(ctx->dev_id); - rte_security_session_destroy( - sec_ctx, - (void *)ctx->sess); + void *sec_ctx = rte_cryptodev_get_sec_ctx(ctx->dev_id); + + rte_security_session_destroy(sec_ctx, (void *)ctx->sess); } #endif else diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c index 8042c94e04b..a6c0ffe813c 100644 --- a/app/test-crypto-perf/cperf_test_verify.c +++ b/app/test-crypto-perf/cperf_test_verify.c @@ -48,8 +48,8 @@ cperf_verify_test_free(struct cperf_verify_ctx *ctx) else if (ctx->options->op_type == CPERF_PDCP || ctx->options->op_type == CPERF_DOCSIS || ctx->options->op_type == CPERF_IPSEC) { - struct rte_security_ctx *sec_ctx = - rte_cryptodev_get_sec_ctx(ctx->dev_id); + void *sec_ctx = rte_cryptodev_get_sec_ctx(ctx->dev_id); + rte_security_session_destroy(sec_ctx, ctx->sess); } #endif diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c index bc1f0f96597..6a2e5762a3d 100644 --- a/app/test-crypto-perf/main.c +++ b/app/test-crypto-perf/main.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #ifdef RTE_CRYPTO_SCHEDULER #include @@ -560,6 +561,7 @@ main(int argc, char **argv) int ret; uint32_t lcore_id; + bool cap_unsupported = false; /* Initialise DPDK EAL */ ret = rte_eal_init(argc, argv); @@ -600,6 +602,7 @@ main(int argc, char **argv) if (ret) { RTE_LOG(ERR, USER1, "Crypto device type does not support " "capabilities requested\n"); + cap_unsupported = true; goto err; } @@ -819,6 +822,10 @@ main(int argc, char **argv) rte_free(opts.imix_buffer_sizes); free_test_vector(t_vec, &opts); + if (rte_errno == ENOTSUP || cap_unsupported) { + RTE_LOG(ERR, USER1, "Unsupported case: errno: %u\n", rte_errno); + return -ENOTSUP; + } printf("\n"); return EXIT_FAILURE; } diff --git a/app/test-fib/main.c b/app/test-fib/main.c index eafd4e2be05..75a56135f21 100644 --- a/app/test-fib/main.c +++ b/app/test-fib/main.c @@ -864,7 +864,7 @@ run_v4(void) conf.max_routes = config.nb_routes * 2; conf.rib_ext_sz = 0; if (conf.type == RTE_FIB_DIR24_8) { - conf.dir24_8.nh_sz = __builtin_ctz(config.ent_sz); + conf.dir24_8.nh_sz = rte_ctz32(config.ent_sz); conf.dir24_8.num_tbl8 = RTE_MIN(config.tbl8, get_max_nh(conf.dir24_8.nh_sz)); } @@ -1065,7 +1065,7 @@ run_v6(void) conf.max_routes = config.nb_routes * 2; conf.rib_ext_sz = 0; if (conf.type == RTE_FIB6_TRIE) { - conf.trie.nh_sz = __builtin_ctz(config.ent_sz); + conf.trie.nh_sz = rte_ctz32(config.ent_sz); conf.trie.num_tbl8 = RTE_MIN(config.tbl8, get_max_nh(conf.trie.nh_sz)); } @@ -1293,12 +1293,12 @@ main(int argc, char **argv) "Bad routes distribution configuration\n"); if (af == AF_INET) { gen_random_rt_4(config.rt, - __builtin_ctz(config.ent_sz)); + rte_ctz32(config.ent_sz)); if (config.flags & SHUFFLE_FLAG) shuffle_rt_4(config.rt, config.nb_routes); } else { gen_random_rt_6(config.rt, - __builtin_ctz(config.ent_sz)); + rte_ctz32(config.ent_sz)); if (config.flags & SHUFFLE_FLAG) shuffle_rt_6(config.rt, config.nb_routes); } diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c index bed06d1bf7d..72357c1c393 100644 --- a/app/test-mldev/ml_options.c +++ b/app/test-mldev/ml_options.c @@ -28,7 +28,6 @@ ml_options_default(struct ml_options *opt) opt->burst_size = 1; opt->queue_pairs = 1; opt->queue_size = 1; - opt->batches = 0; opt->tolerance = 0.0; opt->stats = false; opt->debug = false; @@ -212,18 +211,6 @@ ml_parse_queue_size(struct ml_options *opt, const char *arg) return ret; } -static int -ml_parse_batches(struct ml_options *opt, const char *arg) -{ - int ret; - - ret = parser_read_uint16(&opt->batches, arg); - if (ret != 0) - ml_err("Invalid option, batches = %s\n", arg); - - return ret; -} - static int ml_parse_tolerance(struct ml_options *opt, const char *arg) { @@ -254,7 +241,6 @@ ml_dump_test_options(const char *testname) "\t\t--burst_size : inference burst size\n" "\t\t--queue_pairs : number of queue pairs to create\n" "\t\t--queue_size : size of queue-pair\n" - "\t\t--batches : number of batches of input\n" "\t\t--tolerance : maximum tolerance (%%) for output validation\n" "\t\t--stats : enable reporting device and model statistics\n"); printf("\n"); @@ -286,7 +272,6 @@ static struct option lgopts[] = { {ML_BURST_SIZE, 1, 0, 0}, {ML_QUEUE_PAIRS, 1, 0, 0}, {ML_QUEUE_SIZE, 1, 0, 0}, - {ML_BATCHES, 1, 0, 0}, {ML_TOLERANCE, 1, 0, 0}, {ML_STATS, 0, 0, 0}, {ML_DEBUG, 0, 0, 0}, @@ -308,7 +293,6 @@ ml_opts_parse_long(int opt_idx, struct ml_options *opt) {ML_BURST_SIZE, ml_parse_burst_size}, {ML_QUEUE_PAIRS, ml_parse_queue_pairs}, {ML_QUEUE_SIZE, ml_parse_queue_size}, - {ML_BATCHES, ml_parse_batches}, {ML_TOLERANCE, ml_parse_tolerance}, }; diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h index 622a4c05fc2..90e22adeac1 100644 --- a/app/test-mldev/ml_options.h +++ b/app/test-mldev/ml_options.h @@ -21,7 +21,6 @@ #define ML_BURST_SIZE ("burst_size") #define ML_QUEUE_PAIRS ("queue_pairs") #define ML_QUEUE_SIZE ("queue_size") -#define ML_BATCHES ("batches") #define ML_TOLERANCE ("tolerance") #define ML_STATS ("stats") #define ML_DEBUG ("debug") @@ -44,7 +43,6 @@ struct ml_options { uint16_t burst_size; uint16_t queue_pairs; uint16_t queue_size; - uint16_t batches; float tolerance; bool stats; bool debug; diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c index 418bf38be4c..7a4f2b68dcf 100644 --- a/app/test-mldev/test_inference_common.c +++ b/app/test-mldev/test_inference_common.c @@ -3,6 +3,7 @@ */ #include +#include #include #include @@ -18,11 +19,6 @@ #include "ml_common.h" #include "test_inference_common.h" -#define ML_TEST_READ_TYPE(buffer, type) (*((type *)buffer)) - -#define ML_TEST_CHECK_OUTPUT(output, reference, tolerance) \ - (((float)output - (float)reference) <= (((float)reference * tolerance) / 100.0)) - #define ML_OPEN_WRITE_GET_ERR(name, buffer, size, err) \ do { \ FILE *fp = fopen(name, "w+"); \ @@ -51,7 +47,10 @@ ml_enqueue_single(void *arg) uint64_t start_cycle; uint32_t burst_enq; uint32_t lcore_id; + uint64_t offset; + uint64_t bufsz; uint16_t fid; + uint32_t i; int ret; lcore_id = rte_lcore_id(); @@ -70,24 +69,64 @@ ml_enqueue_single(void *arg) if (ret != 0) goto next_model; -retry: +retry_req: ret = rte_mempool_get(t->model[fid].io_pool, (void **)&req); if (ret != 0) - goto retry; + goto retry_req; + +retry_inp_segs: + ret = rte_mempool_get_bulk(t->buf_seg_pool, (void **)req->inp_buf_segs, + t->model[fid].info.nb_inputs); + if (ret != 0) + goto retry_inp_segs; + +retry_out_segs: + ret = rte_mempool_get_bulk(t->buf_seg_pool, (void **)req->out_buf_segs, + t->model[fid].info.nb_outputs); + if (ret != 0) + goto retry_out_segs; op->model_id = t->model[fid].id; - op->nb_batches = t->model[fid].nb_batches; + op->nb_batches = t->model[fid].info.min_batches; op->mempool = t->op_pool; + op->input = req->inp_buf_segs; + op->output = req->out_buf_segs; + op->user_ptr = req; - op->input.addr = req->input; - op->input.length = t->model[fid].inp_qsize; - op->input.next = NULL; + if (t->model[fid].info.io_layout == RTE_ML_IO_LAYOUT_PACKED) { + op->input[0]->addr = req->input; + op->input[0]->iova_addr = rte_mem_virt2iova(req->input); + op->input[0]->length = t->model[fid].inp_qsize; + op->input[0]->next = NULL; - op->output.addr = req->output; - op->output.length = t->model[fid].out_qsize; - op->output.next = NULL; + op->output[0]->addr = req->output; + op->output[0]->iova_addr = rte_mem_virt2iova(req->output); + op->output[0]->length = t->model[fid].out_qsize; + op->output[0]->next = NULL; + } else { + offset = 0; + for (i = 0; i < t->model[fid].info.nb_inputs; i++) { + bufsz = RTE_ALIGN_CEIL(t->model[fid].info.input_info[i].size, + t->cmn.dev_info.align_size); + op->input[i]->addr = req->input + offset; + op->input[i]->iova_addr = rte_mem_virt2iova(req->input + offset); + op->input[i]->length = bufsz; + op->input[i]->next = NULL; + offset += bufsz; + } + + offset = 0; + for (i = 0; i < t->model[fid].info.nb_outputs; i++) { + bufsz = RTE_ALIGN_CEIL(t->model[fid].info.output_info[i].size, + t->cmn.dev_info.align_size); + op->output[i]->addr = req->output + offset; + op->output[i]->iova_addr = rte_mem_virt2iova(req->output + offset); + op->output[i]->length = bufsz; + op->output[i]->next = NULL; + offset += bufsz; + } + } - op->user_ptr = req; req->niters++; req->fid = fid; @@ -147,6 +186,10 @@ ml_dequeue_single(void *arg) } req = (struct ml_request *)op->user_ptr; rte_mempool_put(t->model[req->fid].io_pool, req); + rte_mempool_put_bulk(t->buf_seg_pool, (void **)op->input, + t->model[req->fid].info.nb_inputs); + rte_mempool_put_bulk(t->buf_seg_pool, (void **)op->output, + t->model[req->fid].info.nb_outputs); rte_mempool_put(t->op_pool, op); } @@ -168,9 +211,12 @@ ml_enqueue_burst(void *arg) uint16_t burst_enq; uint32_t lcore_id; uint16_t pending; + uint64_t offset; + uint64_t bufsz; uint16_t idx; uint16_t fid; uint16_t i; + uint16_t j; int ret; lcore_id = rte_lcore_id(); @@ -190,25 +236,70 @@ ml_enqueue_burst(void *arg) if (ret != 0) goto next_model; -retry: +retry_reqs: ret = rte_mempool_get_bulk(t->model[fid].io_pool, (void **)args->reqs, ops_count); if (ret != 0) - goto retry; + goto retry_reqs; for (i = 0; i < ops_count; i++) { +retry_inp_segs: + ret = rte_mempool_get_bulk(t->buf_seg_pool, (void **)args->reqs[i]->inp_buf_segs, + t->model[fid].info.nb_inputs); + if (ret != 0) + goto retry_inp_segs; + +retry_out_segs: + ret = rte_mempool_get_bulk(t->buf_seg_pool, (void **)args->reqs[i]->out_buf_segs, + t->model[fid].info.nb_outputs); + if (ret != 0) + goto retry_out_segs; + args->enq_ops[i]->model_id = t->model[fid].id; - args->enq_ops[i]->nb_batches = t->model[fid].nb_batches; + args->enq_ops[i]->nb_batches = t->model[fid].info.min_batches; args->enq_ops[i]->mempool = t->op_pool; + args->enq_ops[i]->input = args->reqs[i]->inp_buf_segs; + args->enq_ops[i]->output = args->reqs[i]->out_buf_segs; + args->enq_ops[i]->user_ptr = args->reqs[i]; - args->enq_ops[i]->input.addr = args->reqs[i]->input; - args->enq_ops[i]->input.length = t->model[fid].inp_qsize; - args->enq_ops[i]->input.next = NULL; + if (t->model[fid].info.io_layout == RTE_ML_IO_LAYOUT_PACKED) { + args->enq_ops[i]->input[0]->addr = args->reqs[i]->input; + args->enq_ops[i]->input[0]->iova_addr = + rte_mem_virt2iova(args->reqs[i]->input); + args->enq_ops[i]->input[0]->length = t->model[fid].inp_qsize; + args->enq_ops[i]->input[0]->next = NULL; + + args->enq_ops[i]->output[0]->addr = args->reqs[i]->output; + args->enq_ops[i]->output[0]->iova_addr = + rte_mem_virt2iova(args->reqs[i]->output); + args->enq_ops[i]->output[0]->length = t->model[fid].out_qsize; + args->enq_ops[i]->output[0]->next = NULL; + } else { + offset = 0; + for (j = 0; j < t->model[fid].info.nb_inputs; j++) { + bufsz = RTE_ALIGN_CEIL(t->model[fid].info.input_info[i].size, + t->cmn.dev_info.align_size); + + args->enq_ops[i]->input[j]->addr = args->reqs[i]->input + offset; + args->enq_ops[i]->input[j]->iova_addr = + rte_mem_virt2iova(args->reqs[i]->input + offset); + args->enq_ops[i]->input[j]->length = t->model[fid].inp_qsize; + args->enq_ops[i]->input[j]->next = NULL; + offset += bufsz; + } - args->enq_ops[i]->output.addr = args->reqs[i]->output; - args->enq_ops[i]->output.length = t->model[fid].out_qsize; - args->enq_ops[i]->output.next = NULL; + offset = 0; + for (j = 0; j < t->model[fid].info.nb_outputs; j++) { + bufsz = RTE_ALIGN_CEIL(t->model[fid].info.output_info[i].size, + t->cmn.dev_info.align_size); + args->enq_ops[i]->output[j]->addr = args->reqs[i]->output + offset; + args->enq_ops[i]->output[j]->iova_addr = + rte_mem_virt2iova(args->reqs[i]->output + offset); + args->enq_ops[i]->output[j]->length = t->model[fid].out_qsize; + args->enq_ops[i]->output[j]->next = NULL; + offset += bufsz; + } + } - args->enq_ops[i]->user_ptr = args->reqs[i]; args->reqs[i]->niters++; args->reqs[i]->fid = fid; } @@ -279,8 +370,15 @@ ml_dequeue_burst(void *arg) t->error_count[lcore_id]++; } req = (struct ml_request *)args->deq_ops[i]->user_ptr; - if (req != NULL) + if (req != NULL) { rte_mempool_put(t->model[req->fid].io_pool, req); + rte_mempool_put_bulk(t->buf_seg_pool, + (void **)args->deq_ops[i]->input, + t->model[req->fid].info.nb_inputs); + rte_mempool_put_bulk(t->buf_seg_pool, + (void **)args->deq_ops[i]->output, + t->model[req->fid].info.nb_outputs); + } } rte_mempool_put_bulk(t->op_pool, (void *)args->deq_ops, burst_deq); } @@ -319,6 +417,12 @@ test_inference_cap_check(struct ml_options *opt) return false; } + if (dev_info.max_io < ML_TEST_MAX_IO_SIZE) { + ml_err("Insufficient capabilities: Max I/O, count = %u > (max limit = %u)", + ML_TEST_MAX_IO_SIZE, dev_info.max_io); + return false; + } + return true; } @@ -407,11 +511,6 @@ test_inference_opt_dump(struct ml_options *opt) ml_dump("tolerance", "%-7.3f", opt->tolerance); ml_dump("stats", "%s", (opt->stats ? "true" : "false")); - if (opt->batches == 0) - ml_dump("batches", "%u (default batch size)", opt->batches); - else - ml_dump("batches", "%u", opt->batches); - ml_dump_begin("filelist"); for (i = 0; i < opt->nb_filelist; i++) { ml_dump_list("model", i, opt->filelist[i].model); @@ -496,10 +595,18 @@ void test_inference_destroy(struct ml_test *test, struct ml_options *opt) { struct test_inference *t; + uint32_t lcore_id; RTE_SET_USED(opt); t = ml_test_priv(test); + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + rte_free(t->args[lcore_id].enq_ops); + rte_free(t->args[lcore_id].deq_ops); + rte_free(t->args[lcore_id].reqs); + } + rte_free(t); } @@ -576,19 +683,62 @@ ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned { struct test_inference *t = ml_test_priv((struct ml_test *)opaque); struct ml_request *req = (struct ml_request *)obj; + struct rte_ml_buff_seg dbuff_seg[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg qbuff_seg[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg *q_segs[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg *d_segs[ML_TEST_MAX_IO_SIZE]; + uint64_t offset; + uint64_t bufsz; + uint32_t i; RTE_SET_USED(mp); RTE_SET_USED(obj_idx); req->input = (uint8_t *)obj + - RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size); - req->output = req->input + - RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize, t->cmn.dev_info.min_align_size); + RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.align_size); + req->output = + req->input + RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize, t->cmn.dev_info.align_size); req->niters = 0; + if (t->model[t->fid].info.io_layout == RTE_ML_IO_LAYOUT_PACKED) { + dbuff_seg[0].addr = t->model[t->fid].input; + dbuff_seg[0].iova_addr = rte_mem_virt2iova(t->model[t->fid].input); + dbuff_seg[0].length = t->model[t->fid].inp_dsize; + dbuff_seg[0].next = NULL; + d_segs[0] = &dbuff_seg[0]; + + qbuff_seg[0].addr = req->input; + qbuff_seg[0].iova_addr = rte_mem_virt2iova(req->input); + qbuff_seg[0].length = t->model[t->fid].inp_qsize; + qbuff_seg[0].next = NULL; + q_segs[0] = &qbuff_seg[0]; + } else { + offset = 0; + for (i = 0; i < t->model[t->fid].info.nb_inputs; i++) { + bufsz = t->model[t->fid].info.input_info[i].nb_elements * sizeof(float); + dbuff_seg[i].addr = t->model[t->fid].input + offset; + dbuff_seg[i].iova_addr = rte_mem_virt2iova(t->model[t->fid].input + offset); + dbuff_seg[i].length = bufsz; + dbuff_seg[i].next = NULL; + d_segs[i] = &dbuff_seg[i]; + offset += bufsz; + } + + offset = 0; + for (i = 0; i < t->model[t->fid].info.nb_inputs; i++) { + bufsz = RTE_ALIGN_CEIL(t->model[t->fid].info.input_info[i].size, + t->cmn.dev_info.align_size); + qbuff_seg[i].addr = req->input + offset; + qbuff_seg[i].iova_addr = rte_mem_virt2iova(req->input + offset); + qbuff_seg[i].length = bufsz; + qbuff_seg[i].next = NULL; + q_segs[i] = &qbuff_seg[i]; + offset += bufsz; + } + } + /* quantize data */ - rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, t->model[t->fid].nb_batches, - t->model[t->fid].input, req->input); + rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, d_segs, q_segs); } int @@ -603,24 +753,39 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t uint32_t buff_size; uint32_t mz_size; size_t fsize; + uint32_t i; int ret; /* get input buffer size */ - ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches, - &t->model[fid].inp_qsize, &t->model[fid].inp_dsize); - if (ret != 0) { - ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model); - return ret; + t->model[fid].inp_qsize = 0; + for (i = 0; i < t->model[fid].info.nb_inputs; i++) { + if (t->model[fid].info.io_layout == RTE_ML_IO_LAYOUT_PACKED) + t->model[fid].inp_qsize += t->model[fid].info.input_info[i].size; + else + t->model[fid].inp_qsize += RTE_ALIGN_CEIL( + t->model[fid].info.input_info[i].size, t->cmn.dev_info.align_size); } /* get output buffer size */ - ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches, - &t->model[fid].out_qsize, &t->model[fid].out_dsize); - if (ret != 0) { - ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model); - return ret; + t->model[fid].out_qsize = 0; + for (i = 0; i < t->model[fid].info.nb_outputs; i++) { + if (t->model[fid].info.io_layout == RTE_ML_IO_LAYOUT_PACKED) + t->model[fid].out_qsize += t->model[fid].info.output_info[i].size; + else + t->model[fid].out_qsize += RTE_ALIGN_CEIL( + t->model[fid].info.output_info[i].size, t->cmn.dev_info.align_size); } + t->model[fid].inp_dsize = 0; + for (i = 0; i < t->model[fid].info.nb_inputs; i++) + t->model[fid].inp_dsize += + t->model[fid].info.input_info[i].nb_elements * sizeof(float); + + t->model[fid].out_dsize = 0; + for (i = 0; i < t->model[fid].info.nb_outputs; i++) + t->model[fid].out_dsize += + t->model[fid].info.output_info[i].nb_elements * sizeof(float); + /* allocate buffer for user data */ mz_size = t->model[fid].inp_dsize + t->model[fid].out_dsize; if (strcmp(opt->filelist[fid].reference, "\0") != 0) @@ -677,9 +842,9 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t /* create mempool for quantized input and output buffers. ml_request_initialize is * used as a callback for object creation. */ - buff_size = RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size) + - RTE_ALIGN_CEIL(t->model[fid].inp_qsize, t->cmn.dev_info.min_align_size) + - RTE_ALIGN_CEIL(t->model[fid].out_qsize, t->cmn.dev_info.min_align_size); + buff_size = RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.align_size) + + RTE_ALIGN_CEIL(t->model[fid].inp_qsize, t->cmn.dev_info.align_size) + + RTE_ALIGN_CEIL(t->model[fid].out_qsize, t->cmn.dev_info.align_size); nb_buffers = RTE_MIN((uint64_t)ML_TEST_MAX_POOL_SIZE, opt->repetitions); t->fid = fid; @@ -744,6 +909,18 @@ ml_inference_mem_setup(struct ml_test *test, struct ml_options *opt) return -ENOMEM; } + /* create buf_segs pool of with element of uint8_t. external buffers are attached to the + * buf_segs while queuing inference requests. + */ + t->buf_seg_pool = rte_mempool_create("ml_test_mbuf_pool", ML_TEST_MAX_POOL_SIZE * 2, + sizeof(struct rte_ml_buff_seg), 0, 0, NULL, NULL, NULL, + NULL, opt->socket_id, 0); + if (t->buf_seg_pool == NULL) { + ml_err("Failed to create buf_segs pool : %s\n", "ml_test_mbuf_pool"); + rte_ml_op_pool_free(t->op_pool); + return -ENOMEM; + } + return 0; } @@ -756,6 +933,9 @@ ml_inference_mem_destroy(struct ml_test *test, struct ml_options *opt) /* release op pool */ rte_mempool_free(t->op_pool); + + /* release buf_segs pool */ + rte_mempool_free(t->buf_seg_pool); } static bool @@ -763,9 +943,9 @@ ml_inference_validation(struct ml_test *test, struct ml_request *req) { struct test_inference *t = ml_test_priv((struct ml_test *)test); struct ml_model *model; - uint32_t nb_elements; - uint8_t *reference; - uint8_t *output; + float *reference; + float *output; + float deviation; bool match; uint32_t i; uint32_t j; @@ -777,89 +957,32 @@ ml_inference_validation(struct ml_test *test, struct ml_request *req) match = (rte_hash_crc(model->output, model->out_dsize, 0) == rte_hash_crc(model->reference, model->out_dsize, 0)); } else { - output = model->output; - reference = model->reference; + output = (float *)model->output; + reference = (float *)model->reference; i = 0; next_output: - nb_elements = - model->info.output_info[i].shape.w * model->info.output_info[i].shape.x * - model->info.output_info[i].shape.y * model->info.output_info[i].shape.z; j = 0; next_element: match = false; - switch (model->info.output_info[i].dtype) { - case RTE_ML_IO_TYPE_INT8: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, int8_t), - ML_TEST_READ_TYPE(reference, int8_t), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(int8_t); - reference += sizeof(int8_t); - break; - case RTE_ML_IO_TYPE_UINT8: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, uint8_t), - ML_TEST_READ_TYPE(reference, uint8_t), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(float); - reference += sizeof(float); - break; - case RTE_ML_IO_TYPE_INT16: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, int16_t), - ML_TEST_READ_TYPE(reference, int16_t), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(int16_t); - reference += sizeof(int16_t); - break; - case RTE_ML_IO_TYPE_UINT16: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, uint16_t), - ML_TEST_READ_TYPE(reference, uint16_t), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(uint16_t); - reference += sizeof(uint16_t); - break; - case RTE_ML_IO_TYPE_INT32: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, int32_t), - ML_TEST_READ_TYPE(reference, int32_t), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(int32_t); - reference += sizeof(int32_t); - break; - case RTE_ML_IO_TYPE_UINT32: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, uint32_t), - ML_TEST_READ_TYPE(reference, uint32_t), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(uint32_t); - reference += sizeof(uint32_t); - break; - case RTE_ML_IO_TYPE_FP32: - if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, float), - ML_TEST_READ_TYPE(reference, float), - t->cmn.opt->tolerance)) - match = true; - - output += sizeof(float); - reference += sizeof(float); - break; - default: /* other types, fp8, fp16, bfloat16 */ + if ((*reference == 0) && (*output == 0)) + deviation = 0; + else + deviation = 100 * fabs(*output - *reference) / fabs(*reference); + if (deviation <= t->cmn.opt->tolerance) match = true; - } + else + ml_err("id = %d, element = %d, output = %f, reference = %f, deviation = %f %%\n", + i, j, *output, *reference, deviation); + + output++; + reference++; if (!match) goto done; + j++; - if (j < nb_elements) + if (j < model->info.output_info[i].nb_elements) goto next_element; i++; @@ -880,14 +1003,59 @@ ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned int bool error = false; char *dump_path; + struct rte_ml_buff_seg qbuff_seg[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg dbuff_seg[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg *q_segs[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg *d_segs[ML_TEST_MAX_IO_SIZE]; + uint64_t offset; + uint64_t bufsz; + uint32_t i; + RTE_SET_USED(mp); if (req->niters == 0) return; t->nb_used++; - rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].nb_batches, - req->output, model->output); + + if (t->model[req->fid].info.io_layout == RTE_ML_IO_LAYOUT_PACKED) { + qbuff_seg[0].addr = req->output; + qbuff_seg[0].iova_addr = rte_mem_virt2iova(req->output); + qbuff_seg[0].length = t->model[req->fid].out_qsize; + qbuff_seg[0].next = NULL; + q_segs[0] = &qbuff_seg[0]; + + dbuff_seg[0].addr = model->output; + dbuff_seg[0].iova_addr = rte_mem_virt2iova(model->output); + dbuff_seg[0].length = t->model[req->fid].out_dsize; + dbuff_seg[0].next = NULL; + d_segs[0] = &dbuff_seg[0]; + } else { + offset = 0; + for (i = 0; i < t->model[req->fid].info.nb_outputs; i++) { + bufsz = RTE_ALIGN_CEIL(t->model[req->fid].info.output_info[i].size, + t->cmn.dev_info.align_size); + qbuff_seg[i].addr = req->output + offset; + qbuff_seg[i].iova_addr = rte_mem_virt2iova(req->output + offset); + qbuff_seg[i].length = bufsz; + qbuff_seg[i].next = NULL; + q_segs[i] = &qbuff_seg[i]; + offset += bufsz; + } + + offset = 0; + for (i = 0; i < t->model[req->fid].info.nb_outputs; i++) { + bufsz = t->model[req->fid].info.output_info[i].nb_elements * sizeof(float); + dbuff_seg[i].addr = model->output + offset; + dbuff_seg[i].iova_addr = rte_mem_virt2iova(model->output + offset); + dbuff_seg[i].length = bufsz; + dbuff_seg[i].next = NULL; + d_segs[i] = &dbuff_seg[i]; + offset += bufsz; + } + } + + rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, q_segs, d_segs); if (model->reference == NULL) goto dump_output_pass; diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h index 8f27af25e4f..3f4ba3219be 100644 --- a/app/test-mldev/test_inference_common.h +++ b/app/test-mldev/test_inference_common.h @@ -11,11 +11,16 @@ #include "test_model_common.h" +#define ML_TEST_MAX_IO_SIZE 32 + struct ml_request { uint8_t *input; uint8_t *output; uint16_t fid; uint64_t niters; + + struct rte_ml_buff_seg *inp_buf_segs[ML_TEST_MAX_IO_SIZE]; + struct rte_ml_buff_seg *out_buf_segs[ML_TEST_MAX_IO_SIZE]; }; struct ml_core_args { @@ -38,6 +43,7 @@ struct test_inference { /* test specific data */ struct ml_model model[ML_TEST_MAX_MODELS]; + struct rte_mempool *buf_seg_pool; struct rte_mempool *op_pool; uint64_t nb_used; diff --git a/app/test-mldev/test_model_common.c b/app/test-mldev/test_model_common.c index 8dbb0ff89ff..c517a506117 100644 --- a/app/test-mldev/test_model_common.c +++ b/app/test-mldev/test_model_common.c @@ -50,12 +50,6 @@ ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *mod return ret; } - /* Update number of batches */ - if (opt->batches == 0) - model->nb_batches = model->info.batch_size; - else - model->nb_batches = opt->batches; - model->state = MODEL_LOADED; return 0; diff --git a/app/test-mldev/test_model_common.h b/app/test-mldev/test_model_common.h index c1021ef1b6a..a207e54ab71 100644 --- a/app/test-mldev/test_model_common.h +++ b/app/test-mldev/test_model_common.h @@ -31,7 +31,6 @@ struct ml_model { uint8_t *reference; struct rte_mempool *io_pool; - uint32_t nb_batches; }; int ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *model, diff --git a/app/test-pipeline/pipeline_acl.c b/app/test-pipeline/pipeline_acl.c index 5857bc285f1..2f04868e3ef 100644 --- a/app/test-pipeline/pipeline_acl.c +++ b/app/test-pipeline/pipeline_acl.c @@ -188,9 +188,9 @@ app_main_loop_worker_pipeline_acl(void) { rule_params.field_value[SRC_FIELD_IPV4].value.u32 = 0; rule_params.field_value[SRC_FIELD_IPV4].mask_range.u32 = 0; rule_params.field_value[DST_FIELD_IPV4].value.u32 = - i << (24 - __builtin_popcount(app.n_ports - 1)); + i << (24 - rte_popcount32(app.n_ports - 1)); rule_params.field_value[DST_FIELD_IPV4].mask_range.u32 = - 8 + __builtin_popcount(app.n_ports - 1); + 8 + rte_popcount32(app.n_ports - 1); rule_params.field_value[SRCP_FIELD_IPV4].value.u16 = 0; rule_params.field_value[SRCP_FIELD_IPV4].mask_range.u16 = UINT16_MAX; diff --git a/app/test-pipeline/pipeline_lpm.c b/app/test-pipeline/pipeline_lpm.c index 8add5e71b7c..854319174b4 100644 --- a/app/test-pipeline/pipeline_lpm.c +++ b/app/test-pipeline/pipeline_lpm.c @@ -123,8 +123,8 @@ app_main_loop_worker_pipeline_lpm(void) { }; struct rte_table_lpm_key key = { - .ip = i << (24 - __builtin_popcount(app.n_ports - 1)), - .depth = 8 + __builtin_popcount(app.n_ports - 1), + .ip = i << (24 - rte_popcount32(app.n_ports - 1)), + .depth = 8 + rte_popcount32(app.n_ports - 1), }; struct rte_pipeline_table_entry *entry_ptr; diff --git a/app/test-pipeline/pipeline_lpm_ipv6.c b/app/test-pipeline/pipeline_lpm_ipv6.c index 26b325180da..18d4f018f1b 100644 --- a/app/test-pipeline/pipeline_lpm_ipv6.c +++ b/app/test-pipeline/pipeline_lpm_ipv6.c @@ -123,10 +123,10 @@ app_main_loop_worker_pipeline_lpm_ipv6(void) { uint32_t ip; int key_found, status; - key.depth = 8 + __builtin_popcount(app.n_ports - 1); + key.depth = 8 + rte_popcount32(app.n_ports - 1); ip = rte_bswap32(i << (24 - - __builtin_popcount(app.n_ports - 1))); + rte_popcount32(app.n_ports - 1))); memcpy(key.ip, &ip, sizeof(uint32_t)); printf("Adding rule to IPv6 LPM table (IPv6 destination = " diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 0d0723f6596..679ca47b940 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -500,6 +500,15 @@ static void cmd_help_long_parsed(void *parsed_result, "mac_addr add port (port_id) vf (vf_id) (mac_address)\n" " Add a MAC address for a VF on the port.\n\n" + "mcast_addr add (port_id) (mcast_addr)\n" + " Add a multicast MAC addresses on port_id.\n\n" + + "mcast_addr remove (port_id) (mcast_addr)\n" + " Remove a multicast MAC address from port_id.\n\n" + + "mcast_addr flush (port_id)\n" + " Flush all multicast MAC addresses on port_id.\n\n" + "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n" " Set the MAC address for a VF from the PF.\n\n" @@ -8561,6 +8570,45 @@ static cmdline_parse_inst_t cmd_mcast_addr = { }, }; +/* *** FLUSH MULTICAST MAC ADDRESS ON PORT *** */ +struct cmd_mcast_addr_flush_result { + cmdline_fixed_string_t mcast_addr_cmd; + cmdline_fixed_string_t what; + uint16_t port_num; +}; + +static void cmd_mcast_addr_flush_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_mcast_addr_flush_result *res = parsed_result; + + mcast_addr_flush(res->port_num); +} + +static cmdline_parse_token_string_t cmd_mcast_addr_flush_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result, + mcast_addr_cmd, "mcast_addr"); +static cmdline_parse_token_string_t cmd_mcast_addr_flush_what = + TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result, what, + "flush"); +static cmdline_parse_token_num_t cmd_mcast_addr_flush_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_mcast_addr_result, port_num, + RTE_UINT16); + +static cmdline_parse_inst_t cmd_mcast_addr_flush = { + .f = cmd_mcast_addr_flush_parsed, + .data = (void *)0, + .help_str = "mcast_addr flush : " + "flush all multicast MAC addresses on port_id", + .tokens = { + (void *)&cmd_mcast_addr_flush_cmd, + (void *)&cmd_mcast_addr_flush_what, + (void *)&cmd_mcast_addr_flush_portnum, + NULL, + }, +}; + /* vf vlan anti spoof configuration */ /* Common result structure for vf vlan anti spoof */ @@ -10893,8 +10941,8 @@ print_rx_offloads(uint64_t offloads) if (offloads == 0) return; - begin = __builtin_ctzll(offloads); - end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads); + begin = rte_ctz64(offloads); + end = sizeof(offloads) * CHAR_BIT - rte_clz64(offloads); single_offload = 1ULL << begin; for (bit = begin; bit < end; bit++) { @@ -11312,8 +11360,8 @@ print_tx_offloads(uint64_t offloads) if (offloads == 0) return; - begin = __builtin_ctzll(offloads); - end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads); + begin = rte_ctz64(offloads); + end = sizeof(offloads) * CHAR_BIT - rte_clz64(offloads); single_offload = 1ULL << begin; for (bit = begin; bit < end; bit++) { @@ -12929,6 +12977,7 @@ static cmdline_parse_ctx_t builtin_ctx[] = { (cmdline_parse_inst_t *)&cmd_set_port_meter_stats_mask, (cmdline_parse_inst_t *)&cmd_show_port_meter_stats, (cmdline_parse_inst_t *)&cmd_mcast_addr, + (cmdline_parse_inst_t *)&cmd_mcast_addr_flush, (cmdline_parse_inst_t *)&cmd_set_vf_vlan_anti_spoof, (cmdline_parse_inst_t *)&cmd_set_vf_mac_anti_spoof, (cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq, diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c index 94827bcc4ad..6c8571154e5 100644 --- a/app/test-pmd/cmdline_flow.c +++ b/app/test-pmd/cmdline_flow.c @@ -86,6 +86,7 @@ enum index { PATTERN_TEMPLATE, ACTIONS_TEMPLATE, TABLE, + FLOW_GROUP, INDIRECT_ACTION, VALIDATE, CREATE, @@ -101,6 +102,7 @@ enum index { QUEUE, PUSH, PULL, + HASH, /* Flex arguments */ FLEX_ITEM_INIT, @@ -206,6 +208,18 @@ enum index { TABLE_PATTERN_TEMPLATE, TABLE_ACTIONS_TEMPLATE, + /* Group arguments */ + GROUP_ID, + GROUP_INGRESS, + GROUP_EGRESS, + GROUP_TRANSFER, + GROUP_SET_MISS_ACTIONS, + + /* Hash calculation arguments. */ + HASH_CALC_TABLE, + HASH_CALC_PATTERN_INDEX, + HASH_CALC_PATTERN, + /* Tunnel arguments. */ TUNNEL_CREATE, TUNNEL_CREATE_TYPE, @@ -365,6 +379,7 @@ enum index { ITEM_MPLS_LABEL, ITEM_MPLS_TC, ITEM_MPLS_S, + ITEM_MPLS_TTL, ITEM_GRE, ITEM_GRE_PROTO, ITEM_GRE_C_RSVD0_VER, @@ -385,6 +400,7 @@ enum index { ITEM_GENEVE_OPTLEN, ITEM_VXLAN_GPE, ITEM_VXLAN_GPE_VNI, + ITEM_VXLAN_GPE_PROTO, ITEM_ARP_ETH_IPV4, ITEM_ARP_ETH_IPV4_SHA, ITEM_ARP_ETH_IPV4_SPA, @@ -524,6 +540,9 @@ enum index { ITEM_IB_BTH_PSN, ITEM_IPV6_PUSH_REMOVE_EXT, ITEM_IPV6_PUSH_REMOVE_EXT_TYPE, + ITEM_PTYPE, + ITEM_PTYPE_VALUE, + ITEM_NSH, /* Validate/create actions. */ ACTIONS, @@ -575,7 +594,6 @@ enum index { ACTION_METER_POLICY, ACTION_METER_POLICY_ID2PTR, ACTION_METER_COLOR_MODE, - ACTION_METER_INIT_COLOR, ACTION_METER_STATE, ACTION_OF_DEC_NW_TTL, ACTION_OF_POP_VLAN, @@ -937,6 +955,7 @@ static const char *const modify_field_ids[] = { "flex_item", "hash_result", "geneve_opt_type", "geneve_opt_class", "geneve_opt_data", "mpls", + "tcp_data_off", "ipv4_ihl", "ipv4_total_len", "ipv6_payload_len", NULL }; @@ -1293,6 +1312,14 @@ static const enum index next_at_destroy_attr[] = { ZERO, }; +static const enum index next_group_attr[] = { + GROUP_INGRESS, + GROUP_EGRESS, + GROUP_TRANSFER, + GROUP_SET_MISS_ACTIONS, + ZERO, +}; + static const enum index next_table_subcmd[] = { TABLE_CREATE, TABLE_DESTROY, @@ -1561,6 +1588,8 @@ static const enum index next_item[] = { ITEM_AGGR_AFFINITY, ITEM_TX_QUEUE, ITEM_IB_BTH, + ITEM_PTYPE, + ITEM_NSH, END_SET, ZERO, }; @@ -1712,6 +1741,7 @@ static const enum index item_mpls[] = { ITEM_MPLS_LABEL, ITEM_MPLS_TC, ITEM_MPLS_S, + ITEM_MPLS_TTL, ITEM_NEXT, ZERO, }; @@ -1758,6 +1788,7 @@ static const enum index item_geneve[] = { static const enum index item_vxlan_gpe[] = { ITEM_VXLAN_GPE_VNI, + ITEM_VXLAN_GPE_PROTO, ITEM_NEXT, ZERO, }; @@ -2079,6 +2110,17 @@ static const enum index item_ib_bth[] = { ZERO, }; +static const enum index item_ptype[] = { + ITEM_PTYPE_VALUE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_nsh[] = { + ITEM_NEXT, + ZERO, +}; + static const enum index next_action[] = { ACTION_END, ACTION_VOID, @@ -2227,7 +2269,6 @@ static const enum index action_meter_mark[] = { ACTION_METER_PROFILE, ACTION_METER_POLICY, ACTION_METER_COLOR_MODE, - ACTION_METER_INIT_COLOR, ACTION_METER_STATE, ACTION_NEXT, ZERO, @@ -2678,6 +2719,12 @@ static int parse_push(struct context *, const struct token *, static int parse_pull(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); +static int parse_group(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_hash(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); static int parse_tunnel(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); @@ -3021,6 +3068,7 @@ static const struct token token_list[] = { PATTERN_TEMPLATE, ACTIONS_TEMPLATE, TABLE, + FLOW_GROUP, INDIRECT_ACTION, VALIDATE, CREATE, @@ -3035,7 +3083,8 @@ static const struct token token_list[] = { FLEX, QUEUE, PUSH, - PULL)), + PULL, + HASH)), .call = parse_init, }, /* Top-level command. */ @@ -3411,6 +3460,46 @@ static const struct token token_list[] = { .call = parse_table, }, /* Top-level command. */ + [FLOW_GROUP] = { + .name = "group", + .help = "manage flow groups", + .next = NEXT(NEXT_ENTRY(GROUP_ID), NEXT_ENTRY(COMMON_PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_group, + }, + /* Sub-level commands. */ + [GROUP_SET_MISS_ACTIONS] = { + .name = "set_miss_actions", + .help = "set group miss actions", + .next = NEXT(next_action), + .call = parse_group, + }, + /* Group arguments */ + [GROUP_ID] = { + .name = "group_id", + .help = "group id", + .next = NEXT(next_group_attr, NEXT_ENTRY(COMMON_GROUP_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)), + }, + [GROUP_INGRESS] = { + .name = "ingress", + .help = "group ingress attr", + .next = NEXT(next_group_attr), + .call = parse_group, + }, + [GROUP_EGRESS] = { + .name = "egress", + .help = "group egress attr", + .next = NEXT(next_group_attr), + .call = parse_group, + }, + [GROUP_TRANSFER] = { + .name = "transfer", + .help = "group transfer attr", + .next = NEXT(next_group_attr), + .call = parse_group, + }, + /* Top-level command. */ [QUEUE] = { .name = "queue", .help = "queue a flow rule operation", @@ -3680,6 +3769,33 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY(struct buffer, queue)), }, /* Top-level command. */ + [HASH] = { + .name = "hash", + .help = "calculate hash for a given pattern in a given template table", + .next = NEXT(NEXT_ENTRY(HASH_CALC_TABLE), NEXT_ENTRY(COMMON_PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_hash, + }, + /* Sub-level commands. */ + [HASH_CALC_TABLE] = { + .name = "template_table", + .help = "specify table id", + .next = NEXT(NEXT_ENTRY(HASH_CALC_PATTERN_INDEX), + NEXT_ENTRY(COMMON_TABLE_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, + args.vc.table_id)), + .call = parse_hash, + }, + [HASH_CALC_PATTERN_INDEX] = { + .name = "pattern_template", + .help = "specify pattern template id", + .next = NEXT(NEXT_ENTRY(ITEM_PATTERN), + NEXT_ENTRY(COMMON_UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct buffer, + args.vc.pat_templ_id)), + .call = parse_hash, + }, + /* Top-level command. */ [INDIRECT_ACTION] = { .name = "indirect_action", .type = "{command} {port_id} [{arg} [...]]", @@ -4650,6 +4766,13 @@ static const struct token token_list[] = { label_tc_s, "\x00\x00\x01")), }, + [ITEM_MPLS_TTL] = { + .name = "ttl", + .help = "MPLS Time-to-Live", + .next = NEXT(item_mpls, NEXT_ENTRY(COMMON_UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_mpls, ttl)), + }, [ITEM_GRE] = { .name = "gre", .help = "match GRE header", @@ -4804,6 +4927,14 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe, hdr.vni)), }, + [ITEM_VXLAN_GPE_PROTO] = { + .name = "protocol", + .help = "VXLAN-GPE next protocol", + .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(COMMON_UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe, + protocol)), + }, [ITEM_ARP_ETH_IPV4] = { .name = "arp_eth_ipv4", .help = "match ARP header for Ethernet/IPv4", @@ -5827,6 +5958,30 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ib_bth, hdr.psn)), }, + [ITEM_PTYPE] = { + .name = "ptype", + .help = "match L2/L3/L4 and tunnel information", + .priv = PRIV_ITEM(PTYPE, + sizeof(struct rte_flow_item_ptype)), + .next = NEXT(item_ptype), + .call = parse_vc, + }, + [ITEM_PTYPE_VALUE] = { + .name = "packet_type", + .help = "packet type as defined in rte_mbuf_ptype", + .next = NEXT(item_ptype, NEXT_ENTRY(COMMON_UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_ptype, packet_type)), + }, + [ITEM_NSH] = { + .name = "nsh", + .help = "match NSH header", + .priv = PRIV_ITEM(NSH, + sizeof(struct rte_flow_item_nsh)), + .next = NEXT(item_nsh), + .call = parse_vc, + }, + /* Validate/create actions. */ [ACTIONS] = { .name = "actions", @@ -6175,12 +6330,6 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter_mark, color_mode)), .call = parse_vc_conf, }, - [ACTION_METER_INIT_COLOR] = { - .name = "mtr_init_color", - .help = "meter initial color", - .next = NEXT(action_meter_mark, NEXT_ENTRY(ITEM_METER_COLOR_NAME)), - .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter_mark, init_color)), - }, [ACTION_METER_STATE] = { .name = "mtr_state", .help = "meter state", @@ -10449,6 +10598,96 @@ parse_pull(struct context *ctx, const struct token *token, return len; } +/** Parse tokens for hash calculation commands. */ +static int +parse_hash(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != HASH) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + out->args.vc.data = (uint8_t *)out + size; + return len; + } + switch (ctx->curr) { + case HASH_CALC_TABLE: + case HASH_CALC_PATTERN_INDEX: + return len; + case ITEM_PATTERN: + out->args.vc.pattern = + (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), + sizeof(double)); + ctx->object = out->args.vc.pattern; + ctx->objmask = NULL; + return len; + default: + return -1; + } +} + +static int +parse_group(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != FLOW_GROUP) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + out->args.vc.data = (uint8_t *)out + size; + return len; + } + switch (ctx->curr) { + case GROUP_INGRESS: + out->args.vc.attr.ingress = 1; + return len; + case GROUP_EGRESS: + out->args.vc.attr.egress = 1; + return len; + case GROUP_TRANSFER: + out->args.vc.attr.transfer = 1; + return len; + case GROUP_SET_MISS_ACTIONS: + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + out->args.vc.actions = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), + sizeof(double)); + return len; + default: + return -1; + } +} + static int parse_flex(struct context *ctx, const struct token *token, const char *str, unsigned int len, @@ -12329,6 +12568,10 @@ cmd_flow_parsed(const struct buffer *in) in->args.table_destroy.table_id_n, in->args.table_destroy.table_id); break; + case GROUP_SET_MISS_ACTIONS: + port_queue_group_set_miss_actions(in->port, &in->args.vc.attr, + in->args.vc.actions); + break; case QUEUE_CREATE: port_queue_flow_create(in->port, in->queue, in->postpone, in->args.vc.table_id, in->args.vc.rule_id, @@ -12351,6 +12594,11 @@ cmd_flow_parsed(const struct buffer *in) case PULL: port_queue_flow_pull(in->port, in->queue); break; + case HASH: + port_flow_hash_calc(in->port, in->args.vc.table_id, + in->args.vc.pat_templ_id, + in->args.vc.pattern); + break; case QUEUE_AGED: port_queue_flow_aged(in->port, in->queue, in->args.aged.destroy); @@ -12689,6 +12937,9 @@ flow_item_default_mask(const struct rte_flow_item *item) case RTE_FLOW_ITEM_TYPE_IB_BTH: mask = &rte_flow_item_ib_bth_mask; break; + case RTE_FLOW_ITEM_TYPE_PTYPE: + mask = &rte_flow_item_ptype_mask; + break; default: break; } diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 11f3a220483..b9fdb7e8f16 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -699,8 +700,8 @@ print_dev_capabilities(uint64_t capabilities) if (capabilities == 0) return; - begin = __builtin_ctzll(capabilities); - end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); + begin = rte_ctz64(capabilities); + end = sizeof(capabilities) * CHAR_BIT - rte_clz64(capabilities); single_capa = 1ULL << begin; for (bit = begin; bit < end; bit++) { @@ -3179,7 +3180,6 @@ port_queue_action_handle_update(portid_t port_id, if (mtr_update.meter_mark.policy) mtr_update.policy_valid = 1; mtr_update.color_mode_valid = 1; - mtr_update.init_color_valid = 1; mtr_update.state_valid = 1; update = &mtr_update; break; @@ -3301,6 +3301,59 @@ port_queue_flow_push(portid_t port_id, queueid_t queue_id) return ret; } +/** Calculate the hash result for a given pattern in a given table. */ +int +port_flow_hash_calc(portid_t port_id, uint32_t table_id, + uint8_t pattern_template_index, const struct rte_flow_item pattern[]) +{ + uint32_t hash; + bool found; + struct port_table *pt; + struct rte_port *port; + struct rte_flow_error error; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + + found = false; + pt = port->table_list; + while (pt) { + if (table_id == pt->id) { + found = true; + break; + } + pt = pt->next; + } + if (!found) { + printf("Table #%u is invalid\n", table_id); + return -EINVAL; + } + + memset(&error, 0x55, sizeof(error)); + ret = rte_flow_calc_table_hash(port_id, pt->table, pattern, + pattern_template_index, &hash, &error); + if (ret < 0) { + printf("Failed to calculate hash "); + switch (abs(ret)) { + case ENODEV: + printf("no such device\n"); + break; + case ENOTSUP: + printf("device doesn't support this operation\n"); + break; + default: + printf("\n"); + break; + } + return ret; + } + printf("Hash results 0x%x\n", hash); + return 0; +} + /** Pull queue operation results from the queue. */ static int port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id, @@ -3514,6 +3567,33 @@ port_queue_flow_pull(portid_t port_id, queueid_t queue_id) return ret; } +/* Set group miss actions */ +int +port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr, + const struct rte_flow_action *actions) +{ + struct rte_flow_group_attr gattr = { + .ingress = attr->ingress, + .egress = attr->egress, + .transfer = attr->transfer, + }; + struct rte_flow_error error; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + memset(&error, 0x66, sizeof(error)); + ret = rte_flow_group_set_miss_actions(port_id, attr->group, &gattr, actions, &error); + + if (ret < 0) + return port_flow_complain(&error); + + printf("Group #%u set miss actions succeeded\n", attr->group); + return ret; +} + /** Create flow rule. */ int port_flow_create(portid_t port_id, @@ -6802,6 +6882,24 @@ mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) mcast_addr_pool_append(port, mc_addr); } +void +mcast_addr_flush(portid_t port_id) +{ + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = rte_eth_dev_set_mc_addr_list(port_id, NULL, 0); + if (ret != 0) { + fprintf(stderr, + "Failed to flush all multicast MAC addresses on port_id %u\n", + port_id); + return; + } + mcast_addr_pool_destroy(port_id); +} + void port_dcb_info_display(portid_t port_id) { diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index d2e3f608927..719f875be0e 100644 --- a/app/test-pmd/meson.build +++ b/app/test-pmd/meson.build @@ -22,6 +22,7 @@ sources = files( 'macswap.c', 'noisy_vnf.c', 'parameters.c', + 'recycle_mbufs.c', 'rxonly.c', 'shared_rxq_fwd.c', 'testpmd.c', diff --git a/app/test-pmd/recycle_mbufs.c b/app/test-pmd/recycle_mbufs.c new file mode 100644 index 00000000000..6e9e1c5eb67 --- /dev/null +++ b/app/test-pmd/recycle_mbufs.c @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Arm Limited. + */ + +#include "testpmd.h" + +/* + * Forwarding of packets in I/O mode. + * Enable mbufs recycle mode to recycle txq used mbufs + * for rxq mbuf ring. This can bypass mempool path and + * save CPU cycles. + */ +static bool +pkt_burst_recycle_mbufs(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t nb_rx; + + /* Recycle used mbufs from the txq, and move these mbufs into + * the rxq mbuf ring. + */ + rte_eth_recycle_mbufs(fs->rx_port, fs->rx_queue, + fs->tx_port, fs->tx_queue, &(fs->recycle_rxq_info)); + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return false; + + common_fwd_stream_transmit(fs, pkts_burst, nb_rx); + + return true; +} + +static void +recycle_mbufs_stream_init(struct fwd_stream *fs) +{ + int rc; + + /* Retrieve information about given ports's Rx queue + * for recycling mbufs. + */ + rc = rte_eth_recycle_rx_queue_info_get(fs->rx_port, + fs->rx_queue, &(fs->recycle_rxq_info)); + if (rc != 0) + TESTPMD_LOG(WARNING, + "Failed to get rx queue mbufs recycle info\n"); + + common_fwd_stream_init(fs); +} + +struct fwd_engine recycle_mbufs_engine = { + .fwd_mode_name = "recycle_mbufs", + .stream_init = recycle_mbufs_stream_init, + .packet_fwd = pkt_burst_recycle_mbufs, +}; diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 938ca035d4f..595b77748c2 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -199,6 +199,7 @@ struct fwd_engine * fwd_engines[] = { &icmp_echo_engine, &noisy_vnf_engine, &five_tuple_swap_fwd_engine, + &recycle_mbufs_engine, #ifdef RTE_LIBRTE_IEEE1588 &ieee1588_fwd_engine, #endif @@ -602,27 +603,27 @@ eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, } static int -change_bonding_slave_port_status(portid_t bond_pid, bool is_stop) +change_bonding_member_port_status(portid_t bond_pid, bool is_stop) { #ifdef RTE_NET_BOND - portid_t slave_pids[RTE_MAX_ETHPORTS]; + portid_t member_pids[RTE_MAX_ETHPORTS]; struct rte_port *port; - int num_slaves; - portid_t slave_pid; + int num_members; + portid_t member_pid; int i; - num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids, + num_members = rte_eth_bond_members_get(bond_pid, member_pids, RTE_MAX_ETHPORTS); - if (num_slaves < 0) { - fprintf(stderr, "Failed to get slave list for port = %u\n", + if (num_members < 0) { + fprintf(stderr, "Failed to get member list for port = %u\n", bond_pid); - return num_slaves; + return num_members; } - for (i = 0; i < num_slaves; i++) { - slave_pid = slave_pids[i]; - port = &ports[slave_pid]; + for (i = 0; i < num_members; i++) { + member_pid = member_pids[i]; + port = &ports[member_pid]; port->port_status = is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED; } @@ -646,12 +647,12 @@ eth_dev_start_mp(uint16_t port_id) struct rte_port *port = &ports[port_id]; /* - * Starting a bonded port also starts all slaves under the bonded + * Starting a bonding port also starts all members under the bonding * device. So if this port is bond device, we need to modify the - * port status of these slaves. + * port status of these members. */ if (port->bond_flag == 1) - return change_bonding_slave_port_status(port_id, false); + return change_bonding_member_port_status(port_id, false); } return 0; @@ -670,12 +671,12 @@ eth_dev_stop_mp(uint16_t port_id) struct rte_port *port = &ports[port_id]; /* - * Stopping a bonded port also stops all slaves under the bonded + * Stopping a bonding port also stops all members under the bonding * device. So if this port is bond device, we need to modify the - * port status of these slaves. + * port status of these members. */ if (port->bond_flag == 1) - return change_bonding_slave_port_status(port_id, true); + return change_bonding_member_port_status(port_id, true); } return 0; @@ -2424,6 +2425,13 @@ update_rx_queue_state(uint16_t port_id, uint16_t queue_id) ports[port_id].rxq[queue_id].state = rx_qinfo.queue_state; } else if (rc == -ENOTSUP) { + /* + * Do not change the rxq state for primary process + * to ensure that the PMDs do not implement + * rte_eth_rx_queue_info_get can forward as before. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return; /* * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED * to ensure that the PMDs do not implement @@ -2449,6 +2457,13 @@ update_tx_queue_state(uint16_t port_id, uint16_t queue_id) ports[port_id].txq[queue_id].state = tx_qinfo.queue_state; } else if (rc == -ENOTSUP) { + /* + * Do not change the txq state for primary process + * to ensure that the PMDs do not implement + * rte_eth_tx_queue_info_get can forward as before. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return; /* * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED * to ensure that the PMDs do not implement @@ -2463,12 +2478,15 @@ update_tx_queue_state(uint16_t port_id, uint16_t queue_id) } static void -update_queue_state(void) +update_queue_state(portid_t pid) { portid_t pi; queueid_t qi; RTE_ETH_FOREACH_DEV(pi) { + if (pid != pi && pid != (portid_t)RTE_PORT_ALL) + continue; + for (qi = 0; qi < nb_rxq; qi++) update_rx_queue_state(pi, qi); for (qi = 0; qi < nb_txq; qi++) @@ -2516,8 +2534,7 @@ start_packet_forwarding(int with_tx_first) return; if (stream_init != NULL) { - if (rte_eal_process_type() == RTE_PROC_SECONDARY) - update_queue_state(); + update_queue_state(RTE_PORT_ALL); for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) stream_init(fwd_streams[i]); } @@ -2624,7 +2641,7 @@ all_ports_started(void) port = &ports[pi]; /* Check if there is a port which is not started */ if ((port->port_status != RTE_PORT_STARTED) && - (port->slave_flag == 0)) + (port->member_flag == 0)) return 0; } @@ -2638,7 +2655,7 @@ port_is_stopped(portid_t port_id) struct rte_port *port = &ports[port_id]; if ((port->port_status != RTE_PORT_STOPPED) && - (port->slave_flag == 0)) + (port->member_flag == 0)) return 0; return 1; } @@ -2984,8 +3001,8 @@ fill_xstats_display_info(void) /* * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding - * device in dev_info is zero when no slave is added. And its capability - * will be updated when add a new slave device. So adding a slave device need + * device in dev_info is zero when no member is added. And its capability + * will be updated when add a new member device. So adding a member device need * to update the port configurations of bonding device. */ static void @@ -3042,9 +3059,9 @@ start_port(portid_t pid) if (pid != pi && pid != (portid_t)RTE_PORT_ALL) continue; - if (port_is_bonding_slave(pi)) { + if (port_is_bonding_member(pi)) { fprintf(stderr, - "Please remove port %d from bonded device.\n", + "Please remove port %d from bonding device.\n", pi); continue; } @@ -3280,8 +3297,7 @@ start_port(portid_t pid) pl[cfg_pi++] = pi; } - if (rte_eal_process_type() == RTE_PROC_SECONDARY) - update_queue_state(); + update_queue_state(pi); if (at_least_one_port_successfully_started && !no_link_check) check_all_ports_link_status(RTE_PORT_ALL); @@ -3364,9 +3380,9 @@ stop_port(portid_t pid) continue; } - if (port_is_bonding_slave(pi)) { + if (port_is_bonding_member(pi)) { fprintf(stderr, - "Please remove port %d from bonded device.\n", + "Please remove port %d from bonding device.\n", pi); continue; } @@ -3453,28 +3469,28 @@ flush_port_owned_resources(portid_t pi) } static void -clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves) +clear_bonding_member_device(portid_t *member_pids, uint16_t num_members) { struct rte_port *port; - portid_t slave_pid; + portid_t member_pid; uint16_t i; - for (i = 0; i < num_slaves; i++) { - slave_pid = slave_pids[i]; - if (port_is_started(slave_pid) == 1) { - if (rte_eth_dev_stop(slave_pid) != 0) + for (i = 0; i < num_members; i++) { + member_pid = member_pids[i]; + if (port_is_started(member_pid) == 1) { + if (rte_eth_dev_stop(member_pid) != 0) fprintf(stderr, "rte_eth_dev_stop failed for port %u\n", - slave_pid); + member_pid); - port = &ports[slave_pid]; + port = &ports[member_pid]; port->port_status = RTE_PORT_STOPPED; } - clear_port_slave_flag(slave_pid); + clear_port_member_flag(member_pid); - /* Close slave device when testpmd quit or is killed. */ + /* Close member device when testpmd quit or is killed. */ if (cl_quit == 1 || f_quit == 1) - rte_eth_dev_close(slave_pid); + rte_eth_dev_close(member_pid); } } @@ -3483,8 +3499,8 @@ close_port(portid_t pid) { portid_t pi; struct rte_port *port; - portid_t slave_pids[RTE_MAX_ETHPORTS]; - int num_slaves = 0; + portid_t member_pids[RTE_MAX_ETHPORTS]; + int num_members = 0; if (port_id_is_invalid(pid, ENABLED_WARN)) return; @@ -3502,9 +3518,9 @@ close_port(portid_t pid) continue; } - if (port_is_bonding_slave(pi)) { + if (port_is_bonding_member(pi)) { fprintf(stderr, - "Please remove port %d from bonded device.\n", + "Please remove port %d from bonding device.\n", pi); continue; } @@ -3519,17 +3535,17 @@ close_port(portid_t pid) flush_port_owned_resources(pi); #ifdef RTE_NET_BOND if (port->bond_flag == 1) - num_slaves = rte_eth_bond_slaves_get(pi, - slave_pids, RTE_MAX_ETHPORTS); + num_members = rte_eth_bond_members_get(pi, + member_pids, RTE_MAX_ETHPORTS); #endif rte_eth_dev_close(pi); /* - * If this port is bonded device, all slaves under the + * If this port is bonding device, all members under the * device need to be removed or closed. */ - if (port->bond_flag == 1 && num_slaves > 0) - clear_bonding_slave_device(slave_pids, - num_slaves); + if (port->bond_flag == 1 && num_members > 0) + clear_bonding_member_device(member_pids, + num_members); } free_xstats_display_info(pi); @@ -3569,9 +3585,9 @@ reset_port(portid_t pid) continue; } - if (port_is_bonding_slave(pi)) { + if (port_is_bonding_member(pi)) { fprintf(stderr, - "Please remove port %d from bonded device.\n", + "Please remove port %d from bonding device.\n", pi); continue; } @@ -4009,6 +4025,28 @@ register_eth_event_callback(void) return 0; } +static int +unregister_eth_event_callback(void) +{ + int ret; + enum rte_eth_event_type event; + + for (event = RTE_ETH_EVENT_UNKNOWN; + event < RTE_ETH_EVENT_MAX; event++) { + ret = rte_eth_dev_callback_unregister(RTE_ETH_ALL, + event, + eth_event_callback, + NULL); + if (ret != 0) { + TESTPMD_LOG(ERR, "Failed to unregister callback for " + "%s event\n", eth_event_desc[event]); + return -1; + } + } + + return 0; +} + /* This function is used by the interrupt thread */ static void dev_event_callback(const char *device_name, enum rte_dev_event_type type, @@ -4217,38 +4255,39 @@ init_port_config(void) } } -void set_port_slave_flag(portid_t slave_pid) +void set_port_member_flag(portid_t member_pid) { struct rte_port *port; - port = &ports[slave_pid]; - port->slave_flag = 1; + port = &ports[member_pid]; + port->member_flag = 1; } -void clear_port_slave_flag(portid_t slave_pid) +void clear_port_member_flag(portid_t member_pid) { struct rte_port *port; - port = &ports[slave_pid]; - port->slave_flag = 0; + port = &ports[member_pid]; + port->member_flag = 0; } -uint8_t port_is_bonding_slave(portid_t slave_pid) +uint8_t port_is_bonding_member(portid_t member_pid) { struct rte_port *port; struct rte_eth_dev_info dev_info; int ret; - port = &ports[slave_pid]; - ret = eth_dev_info_get_print_err(slave_pid, &dev_info); + port = &ports[member_pid]; + ret = eth_dev_info_get_print_err(member_pid, &dev_info); if (ret != 0) { TESTPMD_LOG(ERR, "Failed to get device info for port id %d," - "cannot determine if the port is a bonded slave", - slave_pid); + "cannot determine if the port is a bonding member", + member_pid); return 0; } - if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDING_MEMBER) || (port->slave_flag == 1)) + + if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDING_MEMBER) || (port->member_flag == 1)) return 1; return 0; } @@ -4737,6 +4776,11 @@ main(int argc, char** argv) rte_latencystats_uninit(); #endif + ret = unregister_eth_event_callback(); + if (ret != 0) + rte_exit(EXIT_FAILURE, "Cannot unregister for ethdev events"); + + ret = rte_eal_cleanup(); if (ret != 0) rte_exit(EXIT_FAILURE, diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index f1df6a8fafa..09a36b90b80 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -188,6 +188,8 @@ struct fwd_stream { struct pkt_burst_stats rx_burst_stats; struct pkt_burst_stats tx_burst_stats; struct fwd_lcore *lcore; /**< Lcore being scheduled. */ + /**< Rx queue information for recycling mbufs */ + struct rte_eth_recycle_rxq_info recycle_rxq_info; }; /** @@ -337,7 +339,7 @@ struct rte_port { uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ queueid_t queue_nb; /**< nb. of queues for flow rules */ uint32_t queue_sz; /**< size of a queue for flow rules */ - uint8_t slave_flag : 1, /**< bonding slave port */ + uint8_t member_flag : 1, /**< bonding member port */ bond_flag : 1, /**< port is bond device */ fwd_mac_swap : 1, /**< swap packet MAC before forward */ update_conf : 1; /**< need to update bonding device configuration */ @@ -449,6 +451,7 @@ extern struct fwd_engine csum_fwd_engine; extern struct fwd_engine icmp_echo_engine; extern struct fwd_engine noisy_vnf_engine; extern struct fwd_engine five_tuple_swap_fwd_engine; +extern struct fwd_engine recycle_mbufs_engine; #ifdef RTE_LIBRTE_IEEE1588 extern struct fwd_engine ieee1588_fwd_engine; #endif @@ -979,6 +982,8 @@ int port_flow_template_table_create(portid_t port_id, uint32_t id, int port_flow_template_table_destroy(portid_t port_id, uint32_t n, const uint32_t *table); int port_flow_template_table_flush(portid_t port_id); +int port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr, + const struct rte_flow_action *actions); int port_queue_flow_create(portid_t port_id, queueid_t queue_id, bool postpone, uint32_t table_id, uint32_t rule_idx, uint32_t pattern_idx, uint32_t actions_idx, @@ -1009,6 +1014,8 @@ port_queue_action_handle_query_update(portid_t port_id, const struct rte_flow_action *action); int port_queue_flow_push(portid_t port_id, queueid_t queue_id); int port_queue_flow_pull(portid_t port_id, queueid_t queue_id); +int port_flow_hash_calc(portid_t port_id, uint32_t table_id, + uint8_t pattern_template_index, const struct rte_flow_item pattern[]); void port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy); int port_flow_validate(portid_t port_id, const struct rte_flow_attr *attr, @@ -1107,9 +1114,9 @@ void stop_packet_forwarding(void); void dev_set_link_up(portid_t pid); void dev_set_link_down(portid_t pid); void init_port_config(void); -void set_port_slave_flag(portid_t slave_pid); -void clear_port_slave_flag(portid_t slave_pid); -uint8_t port_is_bonding_slave(portid_t slave_pid); +void set_port_member_flag(portid_t member_pid); +void clear_port_member_flag(portid_t member_pid); +uint8_t port_is_bonding_member(portid_t member_pid); int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode, enum rte_eth_nb_tcs num_tcs, @@ -1176,6 +1183,7 @@ void show_mcast_macs(portid_t port_id); /* Functions to manage the set of filtered Multicast MAC addresses */ void mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr); void mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr); +void mcast_addr_flush(portid_t port_id); void port_dcb_info_display(portid_t port_id); uint8_t *open_file(const char *file_path, uint32_t *size); diff --git a/app/test-security-perf/test_security_perf.c b/app/test-security-perf/test_security_perf.c index 9bb50689e32..4dfaca48005 100644 --- a/app/test-security-perf/test_security_perf.c +++ b/app/test-security-perf/test_security_perf.c @@ -344,7 +344,7 @@ test_security_session_perf(void *arg) struct rte_security_session_conf sess_conf; int i, ret, nb_sessions, nb_sess_total; struct rte_security_session **sess; - struct rte_security_ctx *sec_ctx; + void *sec_ctx; double setup_rate, destroy_rate; uint64_t setup_ms, destroy_ms; struct lcore_conf *conf = arg; diff --git a/app/test/meson.build b/app/test/meson.build index 66897c14a39..4183d66b0e9 100644 --- a/app/test/meson.build +++ b/app/test/meson.build @@ -1,446 +1,245 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation - -if not get_option('tests') - subdir_done() -endif - -test_sources = files( - 'commands.c', - 'packet_burst_generator.c', - 'test.c', - 'test_acl.c', - 'test_alarm.c', - 'test_atomic.c', - 'test_barrier.c', - 'test_bitops.c', - 'test_bitmap.c', - 'test_bpf.c', - 'test_byteorder.c', - 'test_cksum.c', - 'test_cksum_perf.c', - 'test_cmdline.c', - 'test_cmdline_cirbuf.c', - 'test_cmdline_etheraddr.c', - 'test_cmdline_ipaddr.c', - 'test_cmdline_lib.c', - 'test_cmdline_num.c', - 'test_cmdline_portlist.c', - 'test_cmdline_string.c', - 'test_common.c', - 'test_cpuflags.c', - 'test_crc.c', - 'test_cryptodev.c', - 'test_cryptodev_asym.c', - 'test_cryptodev_blockcipher.c', - 'test_cryptodev_crosscheck.c', - 'test_cryptodev_security_ipsec.c', - 'test_cryptodev_security_pdcp.c', - 'test_cycles.c', - 'test_debug.c', - 'test_devargs.c', - 'test_distributor.c', - 'test_distributor_perf.c', - 'test_dmadev.c', - 'test_dmadev_api.c', - 'test_eal_flags.c', - 'test_eal_fs.c', - 'test_efd.c', - 'test_efd_perf.c', - 'test_errno.c', - 'test_ethdev_link.c', - 'test_event_crypto_adapter.c', - 'test_event_eth_rx_adapter.c', - 'test_event_ring.c', - 'test_event_timer_adapter.c', - 'test_eventdev.c', - 'test_external_mem.c', - 'test_fbarray.c', - 'test_fib.c', - 'test_fib_perf.c', - 'test_fib6.c', - 'test_fib6_perf.c', - 'test_func_reentrancy.c', - 'test_hash.c', - 'test_hash_functions.c', - 'test_hash_multiwriter.c', - 'test_hash_readwrite.c', - 'test_hash_perf.c', - 'test_hash_readwrite_lf_perf.c', - 'test_interrupts.c', - 'test_ipfrag.c', - 'test_ipsec.c', - 'test_ipsec_sad.c', - 'test_ipsec_perf.c', - 'test_kvargs.c', - 'test_lcores.c', - 'test_logs.c', - 'test_lpm.c', - 'test_lpm6.c', - 'test_lpm6_perf.c', - 'test_lpm_perf.c', - 'test_malloc.c', - 'test_malloc_perf.c', - 'test_mbuf.c', - 'test_member.c', - 'test_member_perf.c', - 'test_memcpy.c', - 'test_memcpy_perf.c', - 'test_memory.c', - 'test_mempool.c', - 'test_mempool_perf.c', - 'test_memzone.c', - 'test_meter.c', - 'test_mcslock.c', - 'test_mp_secondary.c', - 'test_per_lcore.c', - 'test_pflock.c', - 'test_pmd_perf.c', - 'test_power.c', - 'test_power_cpufreq.c', - 'test_power_kvm_vm.c', - 'test_power_intel_uncore.c', - 'test_prefetch.c', - 'test_rand_perf.c', - 'test_rawdev.c', - 'test_rcu_qsbr.c', - 'test_rcu_qsbr_perf.c', - 'test_reassembly_perf.c', - 'test_reciprocal_division.c', - 'test_reciprocal_division_perf.c', - 'test_red.c', - 'test_pie.c', - 'test_reorder.c', - 'test_rib.c', - 'test_rib6.c', - 'test_ring.c', - 'test_ring_mpmc_stress.c', - 'test_ring_hts_stress.c', - 'test_ring_mt_peek_stress.c', - 'test_ring_mt_peek_stress_zc.c', - 'test_ring_perf.c', - 'test_ring_rts_stress.c', - 'test_ring_st_peek_stress.c', - 'test_ring_st_peek_stress_zc.c', - 'test_ring_stress.c', - 'test_rwlock.c', - 'test_sched.c', - 'test_security.c', - 'test_security_inline_macsec.c', - 'test_security_inline_proto.c', - 'test_seqlock.c', - 'test_service_cores.c', - 'test_spinlock.c', - 'test_stack.c', - 'test_stack_perf.c', - 'test_string_fns.c', - 'test_tailq.c', - 'test_thash.c', - 'test_thash_perf.c', - 'test_threads.c', - 'test_timer.c', - 'test_timer_perf.c', - 'test_timer_racecond.c', - 'test_timer_secondary.c', - 'test_ticketlock.c', - 'test_trace.c', - 'test_trace_register.c', - 'test_trace_perf.c', - 'test_version.c', - 'virtual_pmd.c', -) - -test_deps = dpdk_libs_enabled -# as well as libs, the pci and vdev bus drivers are needed for a lot of tests -test_deps += ['bus_pci', 'bus_vdev'] - -# Each test is marked with flags: -# - the first flag indicates whether the test can run in no-huge mode, -# - the second flag indicates whether the test can run with ASan enabled, -fast_tests = [ - ['acl_autotest', true, true], - ['atomic_autotest', false, true], - ['bitmap_autotest', true, true], - ['bpf_autotest', true, true], - ['bpf_convert_autotest', true, true], - ['bitops_autotest', true, true], - ['byteorder_autotest', true, true], - ['cksum_autotest', true, true], - ['cmdline_autotest', true, true], - ['common_autotest', true, true], - ['cpuflags_autotest', true, true], - ['debug_autotest', true, true], - ['devargs_autotest', true, true], - ['eal_flags_c_opt_autotest', false, false], - ['eal_flags_main_opt_autotest', false, false], - ['eal_flags_n_opt_autotest', false, false], - ['eal_flags_hpet_autotest', false, false], - ['eal_flags_no_huge_autotest', false, false], - ['eal_flags_a_opt_autotest', false, false], - ['eal_flags_b_opt_autotest', false, false], - ['eal_flags_vdev_opt_autotest', false, false], - ['eal_flags_r_opt_autotest', false, false], - ['eal_flags_mem_autotest', false, false], - ['eal_flags_file_prefix_autotest', false, false], - ['eal_flags_misc_autotest', false, false], - ['eal_fs_autotest', true, true], - ['errno_autotest', true, true], - ['ethdev_link_status', true, true], - ['event_ring_autotest', true, true], - ['fib_autotest', true, true], - ['fib6_autotest', true, true], - ['func_reentrancy_autotest', false, true], - ['hash_autotest', true, true], - ['interrupt_autotest', true, true], - ['ipfrag_autotest', false, true], - ['lcores_autotest', true, true], - ['logs_autotest', true, true], - ['lpm_autotest', true, true], - ['lpm6_autotest', true, true], - ['malloc_autotest', false, true], - ['mbuf_autotest', false, true], - ['mcslock_autotest', false, true], - ['memcpy_autotest', true, true], - ['memory_autotest', false, true], - ['mempool_autotest', false, true], - ['memzone_autotest', false, true], - ['meter_autotest', true, true], - ['multiprocess_autotest', false, false], - ['per_lcore_autotest', true, true], - ['pflock_autotest', true, true], - ['prefetch_autotest', true, true], - ['rcu_qsbr_autotest', true, true], - ['pie_autotest', true, true], - ['rib_autotest', true, true], - ['rib6_autotest', true, true], - ['ring_autotest', true, true], - ['rwlock_test1_autotest', true, true], - ['rwlock_rda_autotest', true, true], - ['rwlock_rds_wrm_autotest', true, true], - ['rwlock_rde_wro_autotest', true, true], - ['sched_autotest', true, true], - ['security_autotest', false, true], - ['seqlock_autotest', true, true], - ['spinlock_autotest', true, true], - ['stack_autotest', false, true], - ['stack_lf_autotest', false, true], - ['string_autotest', true, true], - ['tailq_autotest', true, true], - ['ticketlock_autotest', true, true], - ['timer_autotest', false, true], - ['user_delay_us', true, true], - ['version_autotest', true, true], - ['crc_autotest', true, true], - ['distributor_autotest', false, true], - ['eventdev_common_autotest', true, true], - ['fbarray_autotest', true, true], - ['hash_readwrite_func_autotest', false, true], - ['ipsec_autotest', true, true], - ['kvargs_autotest', true, true], - ['member_autotest', true, true], - ['power_cpufreq_autotest', false, true], - ['power_autotest', true, true], - ['power_kvm_vm_autotest', false, true], - ['power_intel_uncore_autotest', true, true], - ['reorder_autotest', true, true], - ['service_autotest', true, true], - ['thash_autotest', true, true], - ['threads_autotest', true, true], - ['trace_autotest', true, true], -] - -# Tests known to have issues or which don't belong in other tests lists. -extra_test_names = [ - 'alarm_autotest', # ee00af60170b ("test: remove strict timing requirements some tests") - 'red_autotest', # https://bugs.dpdk.org/show_bug.cgi?id=826 -] - -perf_test_names = [ - 'ring_perf_autotest', - 'malloc_perf_autotest', - 'mempool_perf_autotest', - 'memcpy_perf_autotest', - 'hash_perf_autotest', - 'timer_perf_autotest', - 'reciprocal_division', - 'reciprocal_division_perf', - 'lpm_perf_autotest', - 'rib_slow_autotest', - 'fib_slow_autotest', - 'fib_perf_autotest', - 'red_all', - 'pie_all', - 'barrier_autotest', - 'hash_multiwriter_autotest', - 'timer_racecond_autotest', - 'efd_autotest', - 'hash_functions_autotest', - 'member_perf_autotest', - 'efd_perf_autotest', - 'lpm6_perf_autotest', - 'rib6_slow_autotest', - 'fib6_slow_autotest', - 'fib6_perf_autotest', - 'rcu_qsbr_perf_autotest', - 'red_perf', - 'pie_perf', - 'distributor_perf_autotest', - 'pmd_perf_autotest', - 'service_perf_autotest', - 'stack_perf_autotest', - 'stack_lf_perf_autotest', - 'rand_perf_autotest', - 'hash_readwrite_perf_autotest', - 'hash_readwrite_lf_perf_autotest', - 'trace_perf_autotest', - 'ipsec_perf_autotest', - 'thash_perf_autotest', - 'reassembly_perf_autotest', -] - -driver_test_names = [ - 'cryptodev_aesni_gcm_autotest', - 'cryptodev_aesni_mb_autotest', - 'cryptodev_chacha_poly_mb_autotest', - 'cryptodev_cn10k_autotest', - 'cryptodev_cn9k_autotest', - 'cryptodev_cpu_aesni_mb_autotest', - 'cryptodev_cpu_aesni_gcm_autotest', - 'cryptodev_dpaa2_sec_autotest', - 'cryptodev_dpaa_sec_autotest', - 'cryptodev_null_autotest', - 'cryptodev_openssl_autotest', - 'cryptodev_qat_autotest', - 'cryptodev_qat_asym_autotest', - 'cryptodev_qat_raw_api_autotest', - 'cryptodev_sw_armv8_autotest', - 'cryptodev_sw_kasumi_autotest', - 'cryptodev_sw_mvsam_autotest', - 'cryptodev_sw_snow3g_autotest', - 'cryptodev_sw_zuc_autotest', - 'cryptodev_uadk_autotest', - 'dmadev_autotest', -] - -dump_test_names = [] - -if not is_windows - driver_test_names += [ - 'cryptodev_openssl_asym_autotest', - 'eventdev_selftest_octeontx', - 'eventdev_selftest_sw', - ] - - dump_test_names += [ - 'dump_struct_sizes', - 'dump_mempool', - 'dump_malloc_stats', - 'dump_devargs', - 'dump_log_types', - 'dump_ring', - 'dump_physmem', - 'dump_memzone', - ] -endif - -# The following linkages are an exception to allow running the -# unit tests without requiring that the developer install the -# DPDK libraries. Explicit linkage of drivers (plugin libraries) -# in applications should not be used. -if dpdk_conf.has('RTE_MEMPOOL_RING') - test_deps += 'mempool_ring' -endif -if dpdk_conf.has('RTE_MEMPOOL_STACK') - test_deps += 'mempool_stack' -endif -if dpdk_conf.has('RTE_EVENT_SKELETON') - test_deps += 'event_skeleton' -endif - -if dpdk_conf.has('RTE_LIB_GRAPH') - test_sources += 'test_graph.c' - fast_tests += [['graph_autotest', true, true]] - fast_tests += [['node_list_dump', true, true]] - test_sources += 'test_graph_perf.c' - perf_test_names += 'graph_perf_autotest' -endif -if dpdk_conf.has('RTE_LIB_METRICS') - test_sources += ['test_metrics.c'] - fast_tests += [['metrics_autotest', true, true]] -endif -if not is_windows and dpdk_conf.has('RTE_LIB_TELEMETRY') - test_sources += ['test_telemetry_json.c', 'test_telemetry_data.c'] - fast_tests += [['telemetry_json_autotest', true, true]] - fast_tests += [['telemetry_data_autotest', true, true]] -endif -if dpdk_conf.has('RTE_LIB_PIPELINE') -# pipeline lib depends on port and table libs, so those must be present -# if pipeline library is. - test_sources += [ - 'test_table.c', - 'test_table_acl.c', - 'test_table_combined.c', - 'test_table_pipeline.c', - 'test_table_ports.c', - 'test_table_tables.c', - ] - fast_tests += [['table_autotest', true, true]] -endif - -# The following linkages of drivers are required because -# they are used via a driver-specific API. -if dpdk_conf.has('RTE_NET_BOND') - test_deps += 'net_bond' - test_sources += ['test_link_bonding.c', 'test_link_bonding_rssconf.c'] - driver_test_names += ['link_bonding_autotest', 'link_bonding_rssconf_autotest'] - if dpdk_conf.has('RTE_NET_RING') - test_sources += 'test_link_bonding_mode4.c' - driver_test_names += 'link_bonding_mode4_autotest' - endif -endif -if dpdk_conf.has('RTE_LIB_EVENTDEV') and dpdk_conf.has('RTE_NET_RING') - test_deps += 'net_ring' - test_sources += 'test_pmd_ring_perf.c' - test_sources += 'test_pmd_ring.c' - test_sources += 'test_event_eth_tx_adapter.c' - test_sources += 'sample_packet_forward.c' - fast_tests += [['ring_pmd_autotest', true, true]] - perf_test_names += 'ring_pmd_perf_autotest' - fast_tests += [['event_eth_tx_adapter_autotest', false, true]] - if dpdk_conf.has('RTE_LIB_BITRATESTATS') - test_sources += 'test_bitratestats.c' - fast_tests += [['bitratestats_autotest', true, true]] - endif - if dpdk_conf.has('RTE_LIB_LATENCYSTATS') - test_sources += 'test_latencystats.c' - fast_tests += [['latencystats_autotest', true, true]] +# Copyright(c) 2017-2023 Intel Corporation + +# the main test files [test.c and commands.c] relies on these libraries +deps += ['cmdline', 'ring', 'mempool', 'mbuf'] +sources += files('commands.c', 'test.c') + +# optional dependencies: some files may use these - and so we should link them in - +# but do not explicitly require them so they are not listed in the per-file lists below +optional_deps = ['crypto_scheduler'] + +# some other utility C files, providing functions used by various tests +# so we need to include these deps in the dependency list for the files using those fns. +packet_burst_generator_deps = ['net'] +sample_packet_forward_deps = ['net_ring', 'ethdev', 'bus_vdev'] +virtual_pmd_deps = ['ethdev', 'net', 'bus_pci'] +# test_cryptodev has material that other crypto tests need +test_cryptodev_deps = ['bus_vdev', 'net', 'cryptodev', 'security'] + +source_file_deps = { + # The C files providing functionality to other test cases + 'packet_burst_generator.c': packet_burst_generator_deps, +# 'resource.c': [], # unused currently. + 'sample_packet_forward.c': sample_packet_forward_deps, + 'virtual_pmd.c': virtual_pmd_deps, + + # the various test_*.c files + 'test_acl.c': ['net', 'acl'], + 'test_alarm.c': [], + 'test_atomic.c': ['hash'], + 'test_barrier.c': [], + 'test_bitcount.c': [], + 'test_bitmap.c': [], + 'test_bitops.c': [], + 'test_bitratestats.c': ['metrics', 'bitratestats', 'ethdev'] + sample_packet_forward_deps, + 'test_bpf.c': ['bpf', 'net'], + 'test_byteorder.c': [], +# 'test_cfgfile.c': ['cfgfile'], + 'test_cksum.c': ['net'], + 'test_cksum_perf.c': ['net'], + 'test_cmdline.c': [], + 'test_cmdline_cirbuf.c': [], + 'test_cmdline_etheraddr.c': ['net'], + 'test_cmdline_ipaddr.c': [], + 'test_cmdline_lib.c': [], + 'test_cmdline_num.c': [], + 'test_cmdline_portlist.c': [], + 'test_cmdline_string.c': [], + 'test_common.c': [], + 'test_compressdev.c': ['compressdev'], + 'test_cpuflags.c': [], + 'test_crc.c': ['net'], + 'test_cryptodev.c': test_cryptodev_deps, + 'test_cryptodev_asym.c': ['bus_vdev'] + test_cryptodev_deps, + 'test_cryptodev_blockcipher.c': test_cryptodev_deps, + 'test_cryptodev_crosscheck.c': test_cryptodev_deps, + 'test_cryptodev_security_ipsec.c': test_cryptodev_deps, + 'test_cryptodev_security_pdcp.c': test_cryptodev_deps, + 'test_cycles.c': [], + 'test_debug.c': [], + 'test_devargs.c': ['kvargs'], + 'test_dispatcher.c': ['dispatcher'], + 'test_distributor.c': ['distributor'], + 'test_distributor_perf.c': ['distributor'], + 'test_dmadev.c': ['dmadev', 'bus_vdev'], + 'test_dmadev_api.c': ['dmadev'], + 'test_eal_flags.c': [], + 'test_eal_fs.c': [], + 'test_efd.c': ['efd', 'net'], + 'test_efd_perf.c': ['efd', 'hash'], + 'test_errno.c': [], + 'test_ethdev_api.c': ['ethdev'], + 'test_ethdev_link.c': ['ethdev'], + 'test_event_crypto_adapter.c': ['cryptodev', 'eventdev', 'bus_vdev'], + 'test_event_dma_adapter.c': ['dmadev', 'eventdev', 'bus_vdev'], + 'test_event_eth_rx_adapter.c': ['ethdev', 'eventdev', 'bus_vdev'], + 'test_event_eth_tx_adapter.c': ['bus_vdev', 'ethdev', 'net_ring', 'eventdev'], + 'test_event_ring.c': ['eventdev'], + 'test_event_timer_adapter.c': ['ethdev', 'eventdev', 'bus_vdev'], + 'test_eventdev.c': ['eventdev', 'bus_vdev'], + 'test_external_mem.c': [], + 'test_fbarray.c': [], + 'test_fib.c': ['net', 'fib'], + 'test_fib6.c': ['rib', 'fib'], + 'test_fib6_perf.c': ['fib'], + 'test_fib_perf.c': ['net', 'fib'], + 'test_flow_classify.c': ['net', 'acl', 'table', 'ethdev', 'flow_classify'], + 'test_func_reentrancy.c': ['hash', 'lpm'], + 'test_graph.c': ['graph'], + 'test_graph_perf.c': ['graph'], + 'test_hash.c': ['net', 'hash'], + 'test_hash_functions.c': ['hash'], + 'test_hash_multiwriter.c': ['hash'], + 'test_hash_perf.c': ['hash'], + 'test_hash_readwrite.c': ['hash'], + 'test_hash_readwrite_lf_perf.c': ['hash'], + 'test_interrupts.c': [], + 'test_ipfrag.c': ['net', 'ip_frag'], + 'test_ipsec.c': ['bus_vdev', 'net', 'cryptodev', 'ipsec', 'security'], + 'test_ipsec_perf.c': ['net', 'ipsec'], + 'test_ipsec_sad.c': ['ipsec'], + 'test_kvargs.c': ['kvargs'], + 'test_latencystats.c': ['ethdev', 'latencystats', 'metrics'] + sample_packet_forward_deps, + 'test_lcores.c': [], + 'test_link_bonding.c': ['ethdev', 'net_bond', + 'net'] + packet_burst_generator_deps + virtual_pmd_deps, + 'test_link_bonding_mode4.c': ['ethdev', 'net_ring', 'net_bond', + 'net'] + packet_burst_generator_deps, + 'test_link_bonding_rssconf.c': ['ethdev', 'bus_vdev', 'net_bond'], + 'test_logs.c': [], + 'test_lpm.c': ['net', 'lpm'], + 'test_lpm6.c': ['lpm'], + 'test_lpm6_perf.c': ['lpm'], + 'test_lpm_perf.c': ['net', 'lpm'], + 'test_malloc.c': [], + 'test_malloc_perf.c': [], + 'test_mbuf.c': ['net'], + 'test_mcslock.c': [], + 'test_member.c': ['member', 'net'], + 'test_member_perf.c': ['hash', 'member'], + 'test_memcpy.c': [], + 'test_memcpy_perf.c': [], + 'test_memory.c': [], + 'test_mempool.c': [], + 'test_mempool_perf.c': [], + 'test_memzone.c': [], + 'test_meter.c': ['meter'], + 'test_metrics.c': ['metrics'], + 'test_mp_secondary.c': ['hash', 'lpm'], + 'test_net_ether.c': ['net'], + 'test_pcapng.c': ['ethdev', 'net', 'pcapng'], + 'test_pdcp.c': ['eventdev', 'pdcp', 'net', 'timer', 'security'], + 'test_pdump.c': ['pdump'] + sample_packet_forward_deps, + 'test_per_lcore.c': [], + 'test_pflock.c': [], + 'test_pie.c': ['sched'], + 'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps, + 'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'], + 'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'], + 'test_power.c': ['power'], + 'test_power_cpufreq.c': ['power'], + 'test_power_intel_uncore.c': ['power'], + 'test_power_kvm_vm.c': ['power'], + 'test_prefetch.c': [], + 'test_rand_perf.c': [], + 'test_rawdev.c': ['rawdev', 'bus_vdev'], + 'test_rcu_qsbr.c': ['rcu', 'hash'], + 'test_rcu_qsbr_perf.c': ['rcu', 'hash'], + 'test_reassembly_perf.c': ['net', 'ip_frag'], + 'test_reciprocal_division.c': [], + 'test_reciprocal_division_perf.c': [], + 'test_red.c': ['sched'], + 'test_reorder.c': ['reorder'], +# 'test_resource.c': [], + 'test_rib.c': ['net', 'rib'], + 'test_rib6.c': ['net', 'rib'], + 'test_ring.c': [], + 'test_ring_hts_stress.c': [], + 'test_ring_mpmc_stress.c': [], + 'test_ring_mt_peek_stress.c': [], + 'test_ring_mt_peek_stress_zc.c': [], + 'test_ring_perf.c': [], + 'test_ring_rts_stress.c': [], + 'test_ring_st_peek_stress.c': [], + 'test_ring_st_peek_stress_zc.c': [], + 'test_ring_stress.c': [], + 'test_rwlock.c': [], + 'test_sched.c': ['net', 'sched'], + 'test_security.c': ['net', 'security'], + 'test_security_inline_macsec.c': ['ethdev', 'security'], + 'test_security_inline_proto.c': ['ethdev', 'security', 'eventdev'] + test_cryptodev_deps, + 'test_seqlock.c': [], + 'test_service_cores.c': [], + 'test_spinlock.c': [], + 'test_stack.c': ['stack'], + 'test_stack_perf.c': ['stack'], + 'test_string_fns.c': [], + 'test_table.c': ['table', 'pipeline', 'port'], + 'test_table_acl.c': ['net', 'table', 'pipeline', 'port'], + 'test_table_combined.c': ['table', 'pipeline', 'port'], + 'test_table_pipeline.c': ['pipeline', 'table', 'port'], + 'test_table_ports.c': ['table', 'pipeline', 'port'], + 'test_table_tables.c': ['table', 'pipeline', 'port'], + 'test_tailq.c': [], + 'test_telemetry_data.c': ['telemetry'], + 'test_telemetry_json.c': ['telemetry'], + 'test_thash.c': ['net', 'hash'], + 'test_thash_perf.c': ['hash'], + 'test_threads.c': [], + 'test_ticketlock.c': [], + 'test_timer.c': ['timer'], + 'test_timer_perf.c': ['timer'], + 'test_timer_racecond.c': ['timer'], + 'test_timer_secondary.c': ['timer'], + 'test_trace.c': [], + 'test_trace_perf.c': [], + 'test_trace_register.c': [], + 'test_vdev.c': ['kvargs', 'bus_vdev'], + 'test_version.c': [], +} + +source_file_ext_deps = { + 'test_compressdev.c': ['zlib'], + 'test_pcapng.c': ['pcap'], +} + +def_lib = get_option('default_library') +foreach f, f_deps : source_file_deps + has_deps = true + foreach d : f_deps + if not is_variable(def_lib + '_rte_' + d) + has_deps = false + break + else + # technically we might not need this dep, but adding it is harmless + if d not in deps + deps += d + endif + endif + endforeach + # check for any external dependencies for this file + if source_file_ext_deps.has_key(f) + foreach d: source_file_ext_deps.get(f) + dep = dependency(d, required: false, method: 'pkg-config') + if not dep.found() + message('Skipping test file @0@ due to missing external dependency @1@'.format(f, d)) + has_deps = false + else + ext_deps += dep + endif + endforeach endif - if dpdk_conf.has('RTE_LIB_PDUMP') - test_sources += 'test_pdump.c' - fast_tests += [['pdump_autotest', true, false]] + if has_deps + sources += files(f) endif -endif -if dpdk_conf.has('RTE_NET_NULL') - test_deps += 'net_null' - test_sources += 'test_vdev.c' - fast_tests += [['vdev_autotest', true, true]] -endif -if dpdk_conf.has('RTE_RAW_SKELETON') - test_deps += 'raw_skeleton' - fast_tests += [['rawdev_autotest', true, true]] -endif - -if dpdk_conf.has('RTE_HAS_LIBPCAP') - ext_deps += pcap_dep - if dpdk_conf.has('RTE_LIB_PCAPNG') - test_sources += 'test_pcapng.c' +endforeach +# add the optional dependencies +foreach d:optional_deps + if is_variable(def_lib + '_rte_' + d) and d not in deps + deps += d endif -endif - -if dpdk_conf.has('RTE_LIB_PDCP') - test_sources += 'test_pdcp.c' - fast_tests += [['pdcp_autotest', false, true]] -endif +endforeach if cc.has_argument('-Wno-format-truncation') cflags += '-Wno-format-truncation' @@ -450,154 +249,13 @@ endif cflags += '-fno-strict-aliasing' # Enable using internal APIs in unit tests -cflags += ['-DALLOW_INTERNAL_API'] - -test_dep_objs = [] -if dpdk_conf.has('RTE_LIB_COMPRESSDEV') - compress_test_dep = dependency('zlib', required: false, method: 'pkg-config') - if compress_test_dep.found() - test_dep_objs += compress_test_dep - test_sources += 'test_compressdev.c' - fast_tests += [['compressdev_autotest', false, true]] - endif -endif +cflags += '-DALLOW_INTERNAL_API' -if dpdk_conf.has('RTE_CRYPTO_SCHEDULER') - driver_test_names += 'cryptodev_scheduler_autotest' - test_deps += 'crypto_scheduler' -endif - -foreach d:test_deps - def_lib = get_option('default_library') - test_dep_objs += get_variable(def_lib + '_rte_' + d) -endforeach - -link_libs = [] -if get_option('default_library') == 'static' - link_libs = dpdk_static_libraries + dpdk_drivers -endif - -dpdk_test = executable('dpdk-test', - test_sources, - link_whole: link_libs, - dependencies: test_dep_objs + ext_deps, - c_args: cflags, - install_rpath: join_paths(get_option('prefix'), - driver_install_path), - install: true) - -has_hugepage = run_command(py3, files('has_hugepage.py'), check: true).stdout().strip() != '0' -message('hugepage availability: @0@'.format(has_hugepage)) - -# some perf tests (eg: memcpy perf autotest)take very long -# to complete, so timeout to 10 minutes -timeout_seconds = 600 -timeout_seconds_fast = 10 - -test_no_huge_args = ['--no-huge', '-m', '2048'] - -foreach arg : fast_tests - test_args = [] - run_test = true - if not has_hugepage - if arg[1] - test_args += test_no_huge_args - else - run_test = false - endif - endif - - if get_option('b_sanitize') == 'address' or get_option('b_sanitize') == 'address,undefined' - if not arg[2] - run_test = false - endif - endif - - if (get_option('default_library') == 'shared' and - arg[0] == 'event_eth_tx_adapter_autotest') - test_args += ['-d', dpdk_drivers_build_dir] - endif - if is_linux - test_args += ['--file-prefix=@0@'.format(arg[0])] - endif - - if run_test - test(arg[0], dpdk_test, - env : ['DPDK_TEST=' + arg[0]], - args : test_args, - timeout : timeout_seconds_fast, - is_parallel : false, - suite : 'fast-tests') - if not is_windows and arg[0] == 'trace_autotest' - test_args += ['--trace=.*'] - test_args += ['--trace-dir=@0@'.format(meson.current_build_dir())] - test(arg[0] + '_with_traces', dpdk_test, - env : ['DPDK_TEST=' + arg[0]], - args : test_args, - timeout : timeout_seconds_fast, - is_parallel : false, - suite : 'fast-tests') - endif - endif -endforeach - -if not is_windows and dpdk_conf.has('RTE_LIB_TELEMETRY') - test_args = [dpdk_test] - test_args += test_no_huge_args - if get_option('default_library') == 'shared' - test_args += ['-d', dpdk_drivers_build_dir] - endif - if dpdk_conf.has('RTE_CRYPTO_NULL') - test_args += ['--vdev=crypto_null0'] - endif - if dpdk_conf.has('RTE_DMA_SKELETON') - test_args += ['--vdev=dma_skeleton0'] - endif - if dpdk_conf.has('RTE_EVENT_SKELETON') - test_args += ['--vdev=event_skeleton0'] - endif - if dpdk_conf.has('RTE_NET_NULL') - test_args += ['--vdev=net_null0'] - endif - if dpdk_conf.has('RTE_RAW_SKELETON') - test_args += ['--vdev=rawdev_skeleton0'] - endif - test_args += ['-a', '0000:00:00.0'] - test('telemetry_all', find_program('test_telemetry.sh'), - args: test_args, - timeout : timeout_seconds_fast, - is_parallel : false, - suite : 'fast-tests') +# create a symlink in the app/test directory for the binary, for backward compatibility +if not is_windows + custom_target('test_symlink', + output: 'dpdk-test', + command: ['ln', '-sf', '../dpdk-test', '@OUTPUT@'], + build_by_default: true, + install: false) endif - -foreach arg : perf_test_names - test(arg, dpdk_test, - env : ['DPDK_TEST=' + arg], - timeout : timeout_seconds, - is_parallel : false, - suite : 'perf-tests') -endforeach - -foreach arg : driver_test_names - test(arg, dpdk_test, - env : ['DPDK_TEST=' + arg], - timeout : timeout_seconds, - is_parallel : false, - suite : 'driver-tests') -endforeach - -foreach arg : dump_test_names - test(arg, dpdk_test, - env : ['DPDK_TEST=' + arg], - timeout : timeout_seconds, - is_parallel : false, - suite : 'debug-tests') -endforeach - -foreach arg : extra_test_names - test(arg, dpdk_test, - env : ['DPDK_TEST=' + arg], - timeout : timeout_seconds, - is_parallel : false, - suite : 'extra-tests') -endforeach diff --git a/app/test/process.h b/app/test/process.h index 1f073b9c5c2..af7bc3e0dee 100644 --- a/app/test/process.h +++ b/app/test/process.h @@ -28,8 +28,8 @@ #ifdef RTE_LIB_PDUMP #ifdef RTE_NET_RING -#include -extern void *send_pkts(void *empty); +#include +extern uint32_t send_pkts(void *empty); extern uint16_t flag_for_send_pkts; #endif #endif @@ -49,7 +49,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value) char path[32]; #ifdef RTE_LIB_PDUMP #ifdef RTE_NET_RING - pthread_t thread; + rte_thread_t thread; int rc; #endif #endif @@ -136,7 +136,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value) #ifdef RTE_LIB_PDUMP #ifdef RTE_NET_RING if ((strcmp(env_value, "run_pdump_server_tests") == 0)) { - rc = pthread_create(&thread, NULL, &send_pkts, NULL); + rc = rte_thread_create(&thread, NULL, send_pkts, NULL); if (rc != 0) { rte_panic("Cannot start send pkts thread: %s\n", strerror(rc)); @@ -151,7 +151,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value) #ifdef RTE_NET_RING if ((strcmp(env_value, "run_pdump_server_tests") == 0)) { flag_for_send_pkts = 0; - pthread_join(thread, NULL); + rte_thread_join(thread, NULL); } #endif #endif diff --git a/app/test/suites/meson.build b/app/test/suites/meson.build new file mode 100644 index 00000000000..478f245a543 --- /dev/null +++ b/app/test/suites/meson.build @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 Intel Corporation + +# some perf tests (eg: memcpy perf autotest) take a very long +# to complete, so timeout to 10 minutes +timeout_seconds = 600 +timeout_seconds_fast = 10 + +test_no_huge_args = ['--no-huge', '-m', '2048'] +has_hugepage = run_command(has_hugepages_cmd, check: true).stdout().strip() != '0' +message('hugepage availability: @0@'.format(has_hugepage)) + +# process source files to determine the different unit test suites +# - fast_tests +# - perf_tests +# - driver_tests +test_suites = run_command(get_test_suites_cmd, autotest_sources, + check: true).stdout().strip().split() +foreach suite:test_suites + # simple cases - tests without parameters or special handling + suite = suite.split('=') + suite_name = suite[0] + suite_tests = suite[1].split(',') + if suite_name == 'non_suite_tests' + # tests not in any suite + foreach t: suite_tests + if developer_mode + warning('Test "@0@" is not defined in any test suite'.format(t)) + endif + test(t, dpdk_test, + env: ['DPDK_TEST=' + t], + timeout: timeout_seconds, + is_parallel: false) + endforeach + elif suite_name != 'fast-tests' + # simple cases - tests without parameters or special handling + foreach t: suite_tests + test(t, dpdk_test, + env: ['DPDK_TEST=' + t], + timeout: timeout_seconds, + is_parallel: false, + suite: suite_name) + endforeach + else + # special fast-test handling here + foreach t: suite_tests + params = t.split(':') + test_name = params[0] + nohuge = params[1] == 'true' + asan = params[2] == 'true' + + test_args = [] + if nohuge + test_args += test_no_huge_args + elif not has_hugepage + continue #skip this tests + endif + if not asan and (get_option('b_sanitize') == 'address' + or get_option('b_sanitize') == 'address,undefined') + continue # skip this test + endif + + if get_option('default_library') == 'shared' + test_args += ['-d', dpdk_drivers_build_dir] + endif + + test(test_name, dpdk_test, + args : test_args, + env: ['DPDK_TEST=' + test_name], + timeout : timeout_seconds_fast, + is_parallel : false, + suite : 'fast-tests') + if not is_windows and test_name == 'trace_autotest' + test_args += ['--trace=.*'] + test_args += ['--trace-dir=@0@'.format(meson.current_build_dir())] + test(test_name + '_with_traces', dpdk_test, + args : test_args, + env: ['DPDK_TEST=' + test_name], + timeout : timeout_seconds_fast, + is_parallel : false, + suite : 'fast-tests') + endif + endforeach + endif +endforeach + +# standalone test for telemetry +if not is_windows and dpdk_conf.has('RTE_LIB_TELEMETRY') + test_args = [dpdk_test] + test_args += test_no_huge_args + if get_option('default_library') == 'shared' + test_args += ['-d', dpdk_drivers_build_dir] + endif + if dpdk_conf.has('RTE_CRYPTO_NULL') + test_args += ['--vdev=crypto_null0'] + endif + if dpdk_conf.has('RTE_DMA_SKELETON') + test_args += ['--vdev=dma_skeleton0'] + endif + if dpdk_conf.has('RTE_EVENT_SKELETON') + test_args += ['--vdev=event_skeleton0'] + endif + if dpdk_conf.has('RTE_NET_NULL') + test_args += ['--vdev=net_null0'] + endif + if dpdk_conf.has('RTE_RAW_SKELETON') + test_args += ['--vdev=rawdev_skeleton0'] + endif + test_args += ['-a', '0000:00:00.0'] + test('telemetry_all', find_program('test_telemetry.sh'), + args: test_args, + timeout : timeout_seconds_fast, + is_parallel : false, + suite : 'fast-tests') +endif + +# dump tests are defined in commands.c, and not easily extractable +dump_test_names = [ + 'dump_devargs', + 'dump_log_types', + 'dump_malloc_heaps', + 'dump_malloc_stats', + 'dump_mempool', + 'dump_memzone', + 'dump_physmem', + 'dump_ring', + 'dump_struct_sizes', +] +foreach arg : dump_test_names + test(arg, dpdk_test, + env : ['DPDK_TEST=' + arg], + timeout : timeout_seconds_fast, + is_parallel : false, + suite : 'debug-tests') +endforeach diff --git a/app/test/test_telemetry.sh b/app/test/suites/test_telemetry.sh similarity index 100% rename from app/test/test_telemetry.sh rename to app/test/suites/test_telemetry.sh diff --git a/app/test/test.c b/app/test/test.c index fb073ff7959..bfa9ea52e35 100644 --- a/app/test/test.c +++ b/app/test/test.c @@ -193,6 +193,25 @@ main(int argc, char **argv) if (test_count > 0) { char buf[1024]; + char *dpdk_test_skip = getenv("DPDK_TEST_SKIP"); + char *skip_tests[128] = {0}; + size_t n_skip_tests = 0; + + if (dpdk_test_skip != NULL && strlen(dpdk_test_skip) > 0) { + int split_ret; + char *dpdk_test_skip_cp = strdup(dpdk_test_skip); + if (dpdk_test_skip_cp == NULL) { + ret = -1; + goto out; + } + dpdk_test_skip = dpdk_test_skip_cp; + split_ret = rte_strsplit(dpdk_test_skip, strlen(dpdk_test_skip), + skip_tests, RTE_DIM(skip_tests), ','); + if (split_ret > 0) + n_skip_tests = split_ret; + else + free(dpdk_test_skip); + } cl = cmdline_new(main_ctx, "RTE>>", 0, 1); if (cl == NULL) { @@ -201,6 +220,15 @@ main(int argc, char **argv) } for (i = 0; i < test_count; i++) { + /* check if test is to be skipped */ + for (size_t j = 0; j < n_skip_tests; j++) { + if (strcmp(tests[i], skip_tests[j]) == 0) { + fprintf(stderr, "Skipping %s [DPDK_TEST_SKIP]\n", tests[i]); + ret = TEST_SKIPPED; + goto end_of_cmd; + } + } + snprintf(buf, sizeof(buf), "%s\n", tests[i]); if (cmdline_parse_check(cl, buf) < 0) { printf("Error: invalid test command: '%s'\n", tests[i]); @@ -211,9 +239,13 @@ main(int argc, char **argv) } else ret = last_test_result; +end_of_cmd: if (ret != 0) break; } + if (n_skip_tests > 0) + free(dpdk_test_skip); + cmdline_free(cl); goto out; } else { diff --git a/app/test/test.h b/app/test/test.h index 85f57efbc66..15e23d297f8 100644 --- a/app/test/test.h +++ b/app/test/test.h @@ -12,6 +12,7 @@ #include #include +#include #define TEST_SUCCESS EXIT_SUCCESS #define TEST_FAILED -1 @@ -127,7 +128,7 @@ struct unit_test_case { { setup, teardown, NULL, testcase, #testcase, 1, data } #define TEST_CASE_NAMED_ST(name, setup, teardown, testcase) \ - { setup, teardown, NULL, testcase, name, 1, NULL } + { setup, teardown, testcase, NULL, name, 1, NULL } #define TEST_CASE_NAMED_WITH_DATA(name, setup, teardown, testcase, data) \ { setup, teardown, NULL, testcase, name, 1, data } @@ -190,7 +191,7 @@ struct test_command { void add_test_command(struct test_command *t); -/* Register a test function with its command string */ +/* Register a test function with its command string. Should not be used directly */ #define REGISTER_TEST_COMMAND(cmd, func) \ static struct test_command test_struct_##cmd = { \ .command = RTE_STR(cmd), \ @@ -201,4 +202,11 @@ void add_test_command(struct test_command *t); add_test_command(&test_struct_##cmd); \ } +/* Register a test function as a particular type. + * These can be used to build up test suites automatically + */ +#define REGISTER_FAST_TEST(cmd, no_huge, ASan, func) REGISTER_TEST_COMMAND(cmd, func) +#define REGISTER_PERF_TEST REGISTER_TEST_COMMAND +#define REGISTER_DRIVER_TEST REGISTER_TEST_COMMAND + #endif diff --git a/app/test/test_acl.c b/app/test/test_acl.c index 623f34682e6..8011639ddd1 100644 --- a/app/test/test_acl.c +++ b/app/test/test_acl.c @@ -1749,4 +1749,4 @@ test_acl(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(acl_autotest, test_acl); +REGISTER_FAST_TEST(acl_autotest, true, true, test_acl); diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c index e4b997827ea..db07159e81a 100644 --- a/app/test/test_atomic.c +++ b/app/test/test_atomic.c @@ -631,4 +631,4 @@ test_atomic(void) return 0; } -REGISTER_TEST_COMMAND(atomic_autotest, test_atomic); +REGISTER_FAST_TEST(atomic_autotest, false, true, test_atomic); diff --git a/app/test/test_barrier.c b/app/test/test_barrier.c index ec69af25bff..925a88b68a9 100644 --- a/app/test/test_barrier.c +++ b/app/test/test_barrier.c @@ -285,4 +285,4 @@ test_barrier(void) return ret; } -REGISTER_TEST_COMMAND(barrier_autotest, test_barrier); +REGISTER_PERF_TEST(barrier_autotest, test_barrier); diff --git a/app/test/test_bitcount.c b/app/test/test_bitcount.c new file mode 100644 index 00000000000..83c68feb7bf --- /dev/null +++ b/app/test/test_bitcount.c @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2023 Microsoft Corporation + */ + +#include +#include + +#include +#include + +#include "test.h" + +RTE_LOG_REGISTER(bitcount_logtype_test, test.bitcount, INFO); + +static int +test_clz32(void) +{ + size_t leading; + uint32_t v = 0xffffffff; + + for (leading = 0; v; leading++) { + RTE_TEST_ASSERT(rte_clz32(v) == leading, + "Unexpected count."); + v >>= 1; + } + + return 0; +} + +static int +test_clz64(void) +{ + size_t leading; + uint64_t v = 0xffffffffffffffff; + + for (leading = 0; v; leading++) { + RTE_TEST_ASSERT(rte_clz64(v) == leading, + "Unexpected count."); + v >>= 1; + } + + return 0; +} + +static int +test_ctz32(void) +{ + size_t trailing; + uint32_t v = 1; + + for (trailing = 0; v; trailing++) { + RTE_TEST_ASSERT(rte_ctz32(v) == trailing, + "Unexpected count."); + v <<= 1; + } + + return 0; +} + +static int +test_ctz64(void) +{ + size_t trailing; + uint64_t v = 1; + + for (trailing = 0; v; trailing++) { + RTE_TEST_ASSERT(rte_ctz64(v) == trailing, + "Unexpected count."); + v <<= 1; + } + + return 0; +} + +static int +test_popcount32(void) +{ + size_t shift; + uint32_t v = 0; + const size_t bits = sizeof(v) * CHAR_BIT; + + for (shift = 0; shift < bits; shift++) { + RTE_TEST_ASSERT(rte_popcount32(v) == shift, + "Unexpected count."); + v <<= 1; + v |= 1; + } + + RTE_TEST_ASSERT(rte_popcount32(v) == bits, + "Unexpected count."); + + return 0; +} + +static int +test_popcount64(void) +{ + size_t shift; + uint64_t v = 0; + const size_t bits = sizeof(v) * CHAR_BIT; + + for (shift = 0; shift < bits; shift++) { + RTE_TEST_ASSERT(rte_popcount64(v) == shift, + "Unexpected count."); + v <<= 1; + v |= 1; + } + + RTE_TEST_ASSERT(rte_popcount64(v) == bits, + "Unexpected count."); + + return 0; +} + +static struct unit_test_suite bitcount_test_suite = { + .suite_name = "bitcount autotest", + .setup = NULL, + .teardown = NULL, + .unit_test_cases = { + TEST_CASE(test_clz32), + TEST_CASE(test_clz64), + TEST_CASE(test_ctz32), + TEST_CASE(test_ctz64), + TEST_CASE(test_popcount32), + TEST_CASE(test_popcount64), + TEST_CASES_END() + } +}; + +static int +test_bitcount(void) +{ + return unit_test_suite_runner(&bitcount_test_suite); +} + +REGISTER_FAST_TEST(bitcount_autotest, true, true, test_bitcount); diff --git a/app/test/test_bitmap.c b/app/test/test_bitmap.c index e9c61590aef..bab11812c76 100644 --- a/app/test/test_bitmap.c +++ b/app/test/test_bitmap.c @@ -91,7 +91,7 @@ test_bitmap_scan_operations(struct rte_bitmap *bmp) start_pos = pos; nb_set = 0; do { - nb_set += __builtin_popcountll(out_slab); + nb_set += rte_popcount64(out_slab); if (!rte_bitmap_scan(bmp, &pos, &out_slab)) break; } while (pos != start_pos); @@ -245,7 +245,7 @@ test_bitmap_all_set(void) printf("Failed with init bitmap.\n"); return TEST_FAILED; } - pos += (slab ? __builtin_ctzll(slab) : 0); + pos += (slab ? rte_ctz64(slab) : 0); rte_bitmap_clear(bmp, pos); } @@ -269,4 +269,4 @@ test_bitmap(void) return test_bitmap_all_set(); } -REGISTER_TEST_COMMAND(bitmap_autotest, test_bitmap); +REGISTER_FAST_TEST(bitmap_autotest, true, true, test_bitmap); diff --git a/app/test/test_bitops.c b/app/test/test_bitops.c index c21426bf2f2..0d4ccfb468b 100644 --- a/app/test/test_bitops.c +++ b/app/test/test_bitops.c @@ -135,4 +135,4 @@ test_bitops(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(bitops_autotest, test_bitops); +REGISTER_FAST_TEST(bitops_autotest, true, true, test_bitops); diff --git a/app/test/test_bitratestats.c b/app/test/test_bitratestats.c index 1ff540f4c48..926133de580 100644 --- a/app/test/test_bitratestats.c +++ b/app/test/test_bitratestats.c @@ -249,4 +249,4 @@ test_bitratestats(void) { return unit_test_suite_runner(&bitratestats_testsuite); } -REGISTER_TEST_COMMAND(bitratestats_autotest, test_bitratestats); +REGISTER_FAST_TEST(bitratestats_autotest, true, true, test_bitratestats); diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c index f5af5e8a3f4..f83e72a9adf 100644 --- a/app/test/test_bpf.c +++ b/app/test/test_bpf.c @@ -3262,7 +3262,7 @@ test_bpf(void) #endif /* !RTE_LIB_BPF */ -REGISTER_TEST_COMMAND(bpf_autotest, test_bpf); +REGISTER_FAST_TEST(bpf_autotest, true, true, test_bpf); #ifndef RTE_HAS_LIBPCAP @@ -3473,4 +3473,4 @@ test_bpf_convert(void) #endif /* RTE_HAS_LIBPCAP */ -REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert); +REGISTER_FAST_TEST(bpf_convert_autotest, true, true, test_bpf_convert); diff --git a/app/test/test_byteorder.c b/app/test/test_byteorder.c index de14ed539e4..67ca7ebbc83 100644 --- a/app/test/test_byteorder.c +++ b/app/test/test_byteorder.c @@ -63,4 +63,4 @@ test_byteorder(void) return 0; } -REGISTER_TEST_COMMAND(byteorder_autotest, test_byteorder); +REGISTER_FAST_TEST(byteorder_autotest, true, true, test_byteorder); diff --git a/app/test/test_cksum.c b/app/test/test_cksum.c index 6c15de9a931..f2ab5af5a79 100644 --- a/app/test/test_cksum.c +++ b/app/test/test_cksum.c @@ -267,4 +267,4 @@ test_cksum(void) } #undef GOTO_FAIL -REGISTER_TEST_COMMAND(cksum_autotest, test_cksum); +REGISTER_FAST_TEST(cksum_autotest, true, true, test_cksum); diff --git a/app/test/test_cmdline.c b/app/test/test_cmdline.c index 115bee966df..1d8020995c6 100644 --- a/app/test/test_cmdline.c +++ b/app/test/test_cmdline.c @@ -60,4 +60,4 @@ test_cmdline(void) return 0; } -REGISTER_TEST_COMMAND(cmdline_autotest, test_cmdline); +REGISTER_FAST_TEST(cmdline_autotest, true, true, test_cmdline); diff --git a/app/test/test_cmdline_etheraddr.c b/app/test/test_cmdline_etheraddr.c index 9691c32ba25..74953dea6cb 100644 --- a/app/test/test_cmdline_etheraddr.c +++ b/app/test/test_cmdline_etheraddr.c @@ -20,7 +20,7 @@ struct ether_addr_str { }; /* valid strings */ -const struct ether_addr_str ether_addr_valid_strs[] = { +static const struct ether_addr_str ether_addr_valid_strs[] = { {"01:23:45:67:89:AB", 0xAB8967452301ULL}, {"4567:89AB:CDEF", 0xEFCDAB896745ULL}, }; @@ -30,7 +30,7 @@ const struct ether_addr_str ether_addr_valid_strs[] = { * end of token, which is either space chars, null char or * a hash sign. */ -const char * ether_addr_garbage_strs[] = { +static const char * const ether_addr_garbage_strs[] = { "00:11:22:33:44:55\0garbage", "00:11:22:33:44:55#garbage", "00:11:22:33:44:55 garbage", @@ -46,14 +46,13 @@ const char * ether_addr_garbage_strs[] = { #define GARBAGE_ETHERADDR 0x554433221100ULL /* corresponding address */ -const char * ether_addr_invalid_strs[] = { +static const char * const ether_addr_invalid_strs[] = { /* valid chars, invalid syntax */ "0123:45:67:89:AB", "01:23:4567:89:AB", "01:23:45:67:89AB", "012:345:678:9AB", "01:23:45:67:89:ABC", - "01:23:45:67:89:A", "01:23:45:67:89", "01:23:45:67:89:AB:CD", /* invalid chars, valid syntax */ @@ -61,7 +60,6 @@ const char * ether_addr_invalid_strs[] = { "INVA:LIDC:HARS", /* misc */ "01 23 45 67 89 AB", - "01.23.45.67.89.AB", "01,23,45,67,89,AB", "01:23:45\0:67:89:AB", "01:23:45#:67:89:AB", diff --git a/app/test/test_common.c b/app/test/test_common.c index f89e1eb7ee0..21eb2285e1b 100644 --- a/app/test/test_common.c +++ b/app/test/test_common.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -350,4 +351,4 @@ test_common(void) return ret; } -REGISTER_TEST_COMMAND(common_autotest, test_common); +REGISTER_FAST_TEST(common_autotest, true, true, test_common); diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c index fbecaf9aaad..81b8e020066 100644 --- a/app/test/test_compressdev.c +++ b/app/test/test_compressdev.c @@ -4267,4 +4267,4 @@ test_compressdev(void) return unit_test_suite_runner(&compressdev_testsuite); } -REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev); +REGISTER_FAST_TEST(compressdev_autotest, false, true, test_compressdev); diff --git a/app/test/test_cpuflags.c b/app/test/test_cpuflags.c index a0e342ae481..a0ff74720c8 100644 --- a/app/test/test_cpuflags.c +++ b/app/test/test_cpuflags.c @@ -42,7 +42,6 @@ cpu_flag_result(int result) * =========== * * - Check flags from different registers with rte_cpu_get_flag_enabled() - * - Check if register and CPUID functions fail properly */ static int @@ -322,16 +321,7 @@ test_cpuflags(void) CHECK_FOR_FLAG(RTE_CPUFLAG_LBT_MIPS); #endif - /* - * Check if invalid data is handled properly - */ - printf("\nCheck for invalid flag:\t"); - result = rte_cpu_get_flag_enabled(RTE_CPUFLAG_NUMFLAGS); - printf("%s\n", cpu_flag_result(result)); - if (result != -ENOENT) - return -1; - return 0; } -REGISTER_TEST_COMMAND(cpuflags_autotest, test_cpuflags); +REGISTER_FAST_TEST(cpuflags_autotest, true, true, test_cpuflags); diff --git a/app/test/test_crc.c b/app/test/test_crc.c index 5edc8fb13b3..b85fca35fe2 100644 --- a/app/test/test_crc.c +++ b/app/test/test_crc.c @@ -171,4 +171,4 @@ test_crc(void) return 0; } -REGISTER_TEST_COMMAND(crc_autotest, test_crc); +REGISTER_FAST_TEST(crc_autotest, true, true, test_crc); diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index fb2af40b99e..d2c4c6f8b59 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -1426,6 +1427,93 @@ ut_setup_security(void) return dev_configure_and_start(0); } +static int +ut_setup_security_rx_inject(void) +{ + struct rte_mempool *mbuf_pool = rte_mempool_lookup("CRYPTO_MBUFPOOL"); + struct crypto_testsuite_params *ts_params = &testsuite_params; + struct rte_eth_conf port_conf = { + .rxmode = { + .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM | + RTE_ETH_RX_OFFLOAD_SECURITY, + }, + .txmode = { + .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, + }, + .lpbk_mode = 1, /* Enable loopback */ + }; + struct rte_cryptodev_info dev_info; + struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = 8, + .hthresh = 8, + .wthresh = 8, + }, + .rx_free_thresh = 32, + }; + uint16_t nb_ports; + void *sec_ctx; + int ret; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_SECURITY_RX_INJECT) || + !(dev_info.feature_flags & RTE_CRYPTODEV_FF_SECURITY)) { + RTE_LOG(INFO, USER1, + "Feature requirements for IPsec Rx inject test case not met\n"); + return TEST_SKIPPED; + } + + sec_ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); + if (sec_ctx == NULL) + return TEST_SKIPPED; + + nb_ports = rte_eth_dev_count_avail(); + if (nb_ports == 0) + return TEST_SKIPPED; + + ret = rte_eth_dev_configure(0 /* port_id */, + 1 /* nb_rx_queue */, + 0 /* nb_tx_queue */, + &port_conf); + if (ret) { + printf("Could not configure ethdev port 0 [err=%d]\n", ret); + return TEST_SKIPPED; + } + + /* Rx queue setup */ + ret = rte_eth_rx_queue_setup(0 /* port_id */, + 0 /* rx_queue_id */, + 1024 /* nb_rx_desc */, + SOCKET_ID_ANY, + &rx_conf, + mbuf_pool); + if (ret) { + printf("Could not setup eth port 0 queue 0\n"); + return TEST_SKIPPED; + } + + ret = rte_security_rx_inject_configure(sec_ctx, 0, true); + if (ret) { + printf("Could not enable Rx inject offload"); + return TEST_SKIPPED; + } + + ret = rte_eth_dev_start(0); + if (ret) { + printf("Could not start ethdev"); + return TEST_SKIPPED; + } + + ret = rte_eth_promiscuous_enable(0); + if (ret) { + printf("Could not enable promiscuous mode"); + return TEST_SKIPPED; + } + + /* Configure and start cryptodev with no features disabled */ + return dev_configure_and_start(0); +} + void ut_teardown(void) { @@ -1478,6 +1566,33 @@ ut_teardown(void) rte_cryptodev_stop(ts_params->valid_devs[0]); } +static void +ut_teardown_rx_inject(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + void *sec_ctx; + int ret; + + if (rte_eth_dev_count_avail() != 0) { + ret = rte_eth_dev_reset(0); + if (ret) + printf("Could not reset eth port 0"); + + } + + ut_teardown(); + + sec_ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); + if (sec_ctx == NULL) + return; + + ret = rte_security_rx_inject_configure(sec_ctx, 0, false); + if (ret) { + printf("Could not disable Rx inject offload"); + return; + } +} + static int test_device_configure_invalid_dev_id(void) { @@ -6394,6 +6509,9 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, tdata->digest.len) < 0) return TEST_SKIPPED; + if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) + return TEST_SKIPPED; + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; @@ -7829,6 +7947,9 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, if (global_api_test_type == CRYPTODEV_RAW_API_TEST) return TEST_SKIPPED; + if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) + return TEST_SKIPPED; + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; @@ -8925,9 +9046,7 @@ security_proto_supported(enum rte_security_session_action_type action, const struct rte_security_capability *capability; uint16_t i = 0; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx( - ts_params->valid_devs[0]); + void *ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); capabilities = rte_security_capabilities_get(ctx); @@ -8967,9 +9086,7 @@ static int test_pdcp_proto(int i, int oop, enum rte_crypto_cipher_operation opc, struct crypto_unittest_params *ut_params = &unittest_params; uint8_t *plaintext; int ret = TEST_SUCCESS; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx( - ts_params->valid_devs[0]); + void *ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); struct rte_cryptodev_info dev_info; uint64_t feat_flags; @@ -9174,9 +9291,7 @@ test_pdcp_proto_SGL(int i, int oop, unsigned int trn_data = 0; struct rte_cryptodev_info dev_info; uint64_t feat_flags; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx( - ts_params->valid_devs[0]); + void *ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); struct rte_mbuf *temp_mbuf; rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); @@ -9748,6 +9863,263 @@ test_PDCP_SDAP_PROTO_decap_all(void) return (all_err == TEST_SUCCESS) ? TEST_SUCCESS : TEST_FAILED; } +static inline void +ext_mbuf_callback_fn_free(void *addr __rte_unused, void *opaque __rte_unused) +{ +} + +static inline void +ext_mbuf_memzone_free(int nb_segs) +{ + int i; + + for (i = 0; i <= nb_segs; i++) { + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *memzone; + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "ext_buf_%d", i); + memzone = rte_memzone_lookup(mz_name); + if (memzone != NULL) { + rte_memzone_free(memzone); + memzone = NULL; + } + } +} + +static inline struct rte_mbuf * +ext_mbuf_create(struct rte_mempool *mbuf_pool, int pkt_len, + int nb_segs, const void *input_text) +{ + struct rte_mbuf *m = NULL, *mbuf = NULL; + size_t data_off = 0; + uint8_t *dst; + int i, size; + int t_len; + + if (pkt_len < 1) { + printf("Packet size must be 1 or more (is %d)\n", pkt_len); + return NULL; + } + + if (nb_segs < 1) { + printf("Number of segments must be 1 or more (is %d)\n", + nb_segs); + return NULL; + } + + t_len = pkt_len >= nb_segs ? pkt_len / nb_segs : 1; + size = pkt_len; + + /* Create chained mbuf_src with external buffer */ + for (i = 0; size > 0; i++) { + struct rte_mbuf_ext_shared_info *ret_shinfo = NULL; + uint16_t data_len = RTE_MIN(size, t_len); + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *memzone; + void *ext_buf_addr = NULL; + rte_iova_t buf_iova; + bool freed = false; + uint16_t buf_len; + + buf_len = RTE_ALIGN_CEIL(data_len + 1024 + + sizeof(struct rte_mbuf_ext_shared_info), 8); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "ext_buf_%d", i); + memzone = rte_memzone_lookup(mz_name); + if (memzone != NULL && memzone->len != buf_len) { + rte_memzone_free(memzone); + memzone = NULL; + } + if (memzone == NULL) { + memzone = rte_memzone_reserve_aligned(mz_name, buf_len, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); + if (memzone == NULL) { + printf("Can't allocate memory zone %s\n", mz_name); + return NULL; + } + } + + ext_buf_addr = memzone->addr; + memcpy(ext_buf_addr, RTE_PTR_ADD(input_text, data_off), data_len); + + /* Create buffer to hold rte_mbuf header */ + m = rte_pktmbuf_alloc(mbuf_pool); + if (i == 0) + mbuf = m; + + if (m == NULL) { + printf("Cannot create segment for source mbuf"); + goto fail; + } + + /* Save shared data (like callback function) in external buffer's end */ + ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len, + ext_mbuf_callback_fn_free, &freed); + if (ret_shinfo == NULL) { + printf("Shared mem initialization failed!\n"); + goto fail; + } + + buf_iova = rte_mem_virt2iova(ext_buf_addr); + + /* Attach external buffer to mbuf */ + rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len, + ret_shinfo); + if (m->ol_flags != RTE_MBUF_F_EXTERNAL) { + printf("External buffer is not attached to mbuf\n"); + goto fail; + } + + dst = (uint8_t *)rte_pktmbuf_append(m, data_len); + if (dst == NULL) { + printf("Cannot append %d bytes to the mbuf\n", data_len); + goto fail; + } + + if (mbuf != m) + rte_pktmbuf_chain(mbuf, m); + + size -= data_len; + data_off += data_len; + } + + return mbuf; + +fail: + rte_pktmbuf_free(mbuf); + ext_mbuf_memzone_free(nb_segs); + return NULL; +} + +static int +test_ipsec_proto_crypto_op_enq(struct crypto_testsuite_params *ts_params, + struct crypto_unittest_params *ut_params, + struct rte_security_ipsec_xform *ipsec_xform, + const struct ipsec_test_data *td, + const struct ipsec_test_flags *flags, + int pkt_num) +{ + uint8_t dev_id = ts_params->valid_devs[0]; + enum rte_security_ipsec_sa_direction dir; + int ret; + + dir = ipsec_xform->direction; + + /* Generate crypto op data structure */ + ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); + if (!ut_params->op) { + printf("Could not allocate crypto op"); + return TEST_FAILED; + } + + /* Attach session to operation */ + rte_security_attach_session(ut_params->op, ut_params->sec_session); + + /* Set crypto operation mbufs */ + ut_params->op->sym->m_src = ut_params->ibuf; + ut_params->op->sym->m_dst = NULL; + + /* Copy IV in crypto operation when IV generation is disabled */ + if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && + ipsec_xform->options.iv_gen_disable == 1) { + uint8_t *iv = rte_crypto_op_ctod_offset(ut_params->op, + uint8_t *, + IV_OFFSET); + int len; + + if (td->aead) + len = td->xform.aead.aead.iv.length; + else if (td->aes_gmac) + len = td->xform.chain.auth.auth.iv.length; + else + len = td->xform.chain.cipher.cipher.iv.length; + + memcpy(iv, td->iv.data, len); + } + + /* Process crypto operation */ + process_crypto_request(dev_id, ut_params->op); + + ret = test_ipsec_status_check(td, ut_params->op, flags, dir, pkt_num); + + rte_crypto_op_free(ut_params->op); + ut_params->op = NULL; + + return ret; +} + +static int +test_ipsec_proto_mbuf_enq(struct crypto_testsuite_params *ts_params, + struct crypto_unittest_params *ut_params, + void *ctx) +{ + uint64_t timeout, userdata; + struct rte_ether_hdr *hdr; + struct rte_mbuf *m; + void **sec_sess; + int ret; + + RTE_SET_USED(ts_params); + + hdr = (void *)rte_pktmbuf_prepend(ut_params->ibuf, sizeof(struct rte_ether_hdr)); + hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + + ut_params->ibuf->l2_len = sizeof(struct rte_ether_hdr); + + sec_sess = &ut_params->sec_session; + ret = rte_security_inb_pkt_rx_inject(ctx, &ut_params->ibuf, sec_sess, 1); + + if (ret != 1) + return TEST_FAILED; + + ut_params->ibuf = NULL; + + /* Add a timeout for 1 s */ + timeout = rte_get_tsc_cycles() + rte_get_tsc_hz(); + + do { + /* Get packet from port 0, queue 0 */ + ret = rte_eth_rx_burst(0, 0, &m, 1); + } while ((ret == 0) && (rte_get_tsc_cycles() > timeout)); + + if (ret == 0) { + printf("Could not receive packets from ethdev\n"); + return TEST_FAILED; + } + + if (m == NULL) { + printf("Received mbuf is NULL\n"); + return TEST_FAILED; + } + + ut_params->ibuf = m; + + if (!(m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) { + printf("Received packet is not Rx security processed\n"); + return TEST_FAILED; + } + + if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) { + printf("Received packet has failed Rx security processing\n"); + return TEST_FAILED; + } + + /* + * 'ut_params' is set as userdata. Verify that the field is returned + * correctly. + */ + userdata = *(uint64_t *)rte_security_dynfield(m); + if (userdata != (uint64_t)ut_params) { + printf("Userdata retrieved not matching expected\n"); + return TEST_FAILED; + } + + /* Trim L2 header */ + rte_pktmbuf_adj(m, sizeof(struct rte_ether_hdr)); + + return TEST_SUCCESS; +} + static int test_ipsec_proto_process(const struct ipsec_test_data td[], struct ipsec_test_data res_d[], @@ -9772,7 +10144,7 @@ test_ipsec_proto_process(const struct ipsec_test_data td[], struct ipsec_test_data *res_d_tmp = NULL; uint8_t input_text[IPSEC_TEXT_MAX_LEN]; int salt_len, i, ret = TEST_SUCCESS; - struct rte_security_ctx *ctx; + void *ctx; uint32_t src, dst; uint32_t verify; @@ -9937,6 +10309,9 @@ test_ipsec_proto_process(const struct ipsec_test_data td[], } } + if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->rx_inject) + sess_conf.userdata = ut_params; + /* Create security session */ ut_params->sec_session = rte_security_session_create(ctx, &sess_conf, ts_params->session_mpool); @@ -9959,58 +10334,33 @@ test_ipsec_proto_process(const struct ipsec_test_data td[], /* Copy test data before modification */ memcpy(input_text, td[i].input_text.data, td[i].input_text.len); - if (test_ipsec_pkt_update(input_text, flags)) - return TEST_FAILED; - - /* Setup source mbuf payload */ - ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool, td[i].input_text.len, - nb_segs, 0); - pktmbuf_write(ut_params->ibuf, 0, td[i].input_text.len, input_text); - - /* Generate crypto op data structure */ - ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool, - RTE_CRYPTO_OP_TYPE_SYMMETRIC); - if (!ut_params->op) { - printf("TestCase %s line %d: %s\n", - __func__, __LINE__, - "failed to allocate crypto op"); + if (test_ipsec_pkt_update(input_text, flags)) { ret = TEST_FAILED; - goto crypto_op_free; + goto mbuf_free; } - /* Attach session to operation */ - rte_security_attach_session(ut_params->op, - ut_params->sec_session); - - /* Set crypto operation mbufs */ - ut_params->op->sym->m_src = ut_params->ibuf; - ut_params->op->sym->m_dst = NULL; - - /* Copy IV in crypto operation when IV generation is disabled */ - if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && - ipsec_xform.options.iv_gen_disable == 1) { - uint8_t *iv = rte_crypto_op_ctod_offset(ut_params->op, - uint8_t *, - IV_OFFSET); - int len; - - if (td[i].aead) - len = td[i].xform.aead.aead.iv.length; - else if (td[i].aes_gmac) - len = td[i].xform.chain.auth.auth.iv.length; - else - len = td[i].xform.chain.cipher.cipher.iv.length; - - memcpy(iv, td[i].iv.data, len); + /* Setup source mbuf payload */ + if (flags->use_ext_mbuf) { + ut_params->ibuf = ext_mbuf_create(ts_params->mbuf_pool, + td[i].input_text.len, nb_segs, input_text); + } else { + ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool, + td[i].input_text.len, nb_segs, 0); + pktmbuf_write(ut_params->ibuf, 0, td[i].input_text.len, input_text); } - /* Process crypto operation */ - process_crypto_request(dev_id, ut_params->op); + if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->rx_inject) + ret = test_ipsec_proto_mbuf_enq(ts_params, ut_params, + ctx); + else + ret = test_ipsec_proto_crypto_op_enq(ts_params, + ut_params, + &ipsec_xform, + &td[i], flags, + i + 1); - ret = test_ipsec_status_check(&td[i], ut_params->op, flags, dir, - i + 1); if (ret != TEST_SUCCESS) - goto crypto_op_free; + goto mbuf_free; if (res_d != NULL) res_d_tmp = &res_d[i]; @@ -10018,23 +10368,20 @@ test_ipsec_proto_process(const struct ipsec_test_data td[], ret = test_ipsec_post_process(ut_params->ibuf, &td[i], res_d_tmp, silent, flags); if (ret != TEST_SUCCESS) - goto crypto_op_free; + goto mbuf_free; ret = test_ipsec_stats_verify(ctx, ut_params->sec_session, flags, dir); if (ret != TEST_SUCCESS) - goto crypto_op_free; - - rte_crypto_op_free(ut_params->op); - ut_params->op = NULL; + goto mbuf_free; rte_pktmbuf_free(ut_params->ibuf); ut_params->ibuf = NULL; } -crypto_op_free: - rte_crypto_op_free(ut_params->op); - ut_params->op = NULL; +mbuf_free: + if (flags->use_ext_mbuf) + ext_mbuf_memzone_free(nb_segs); rte_pktmbuf_free(ut_params->ibuf); ut_params->ibuf = NULL; @@ -10066,6 +10413,27 @@ test_ipsec_proto_known_vec(const void *test_data) return test_ipsec_proto_process(&td_outb, NULL, 1, false, &flags); } +static int +test_ipsec_proto_known_vec_ext_mbuf(const void *test_data) +{ + struct ipsec_test_data td_outb; + struct ipsec_test_flags flags; + + memset(&flags, 0, sizeof(flags)); + flags.use_ext_mbuf = true; + + memcpy(&td_outb, test_data, sizeof(td_outb)); + + if (td_outb.aes_gmac || td_outb.aead || + ((td_outb.ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_AH) && + (td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL))) { + /* Disable IV gen to be able to test with known vectors */ + td_outb.ipsec_xform.options.iv_gen_disable = 1; + } + + return test_ipsec_proto_process(&td_outb, NULL, 1, false, &flags); +} + static int test_ipsec_proto_known_vec_inb(const void *test_data) { @@ -10100,6 +10468,24 @@ test_ipsec_proto_known_vec_fragmented(const void *test_data) return test_ipsec_proto_process(&td_outb, NULL, 1, false, &flags); } +static int +test_ipsec_proto_known_vec_inb_rx_inject(const void *test_data) +{ + const struct ipsec_test_data *td = test_data; + struct ipsec_test_flags flags; + struct ipsec_test_data td_inb; + + memset(&flags, 0, sizeof(flags)); + flags.rx_inject = true; + + if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) + test_ipsec_td_in_from_out(td, &td_inb); + else + memcpy(&td_inb, td, sizeof(td_inb)); + + return test_ipsec_proto_process(&td_inb, NULL, 1, false, &flags); +} + static int test_ipsec_proto_all(const struct ipsec_test_flags *flags) { @@ -10219,7 +10605,7 @@ test_ipsec_ah_proto_all(const struct ipsec_test_flags *flags) } static int -test_ipsec_proto_display_list(const void *data __rte_unused) +test_ipsec_proto_display_list(void) { struct ipsec_test_flags flags; @@ -10231,7 +10617,7 @@ test_ipsec_proto_display_list(const void *data __rte_unused) } static int -test_ipsec_proto_ah_tunnel_ipv4(const void *data __rte_unused) +test_ipsec_proto_ah_tunnel_ipv4(void) { struct ipsec_test_flags flags; @@ -10244,7 +10630,7 @@ test_ipsec_proto_ah_tunnel_ipv4(const void *data __rte_unused) } static int -test_ipsec_proto_ah_transport_ipv4(const void *data __rte_unused) +test_ipsec_proto_ah_transport_ipv4(void) { struct ipsec_test_flags flags; @@ -10257,7 +10643,7 @@ test_ipsec_proto_ah_transport_ipv4(const void *data __rte_unused) } static int -test_ipsec_proto_iv_gen(const void *data __rte_unused) +test_ipsec_proto_iv_gen(void) { struct ipsec_test_flags flags; @@ -10269,7 +10655,7 @@ test_ipsec_proto_iv_gen(const void *data __rte_unused) } static int -test_ipsec_proto_sa_exp_pkts_soft(const void *data __rte_unused) +test_ipsec_proto_sa_exp_pkts_soft(void) { struct ipsec_test_flags flags; @@ -10281,7 +10667,7 @@ test_ipsec_proto_sa_exp_pkts_soft(const void *data __rte_unused) } static int -test_ipsec_proto_sa_exp_pkts_hard(const void *data __rte_unused) +test_ipsec_proto_sa_exp_pkts_hard(void) { struct ipsec_test_flags flags; @@ -10293,7 +10679,7 @@ test_ipsec_proto_sa_exp_pkts_hard(const void *data __rte_unused) } static int -test_ipsec_proto_err_icv_corrupt(const void *data __rte_unused) +test_ipsec_proto_err_icv_corrupt(void) { struct ipsec_test_flags flags; @@ -10305,7 +10691,7 @@ test_ipsec_proto_err_icv_corrupt(const void *data __rte_unused) } static int -test_ipsec_proto_udp_encap_custom_ports(const void *data __rte_unused) +test_ipsec_proto_udp_encap_custom_ports(void) { struct ipsec_test_flags flags; @@ -10322,7 +10708,7 @@ test_ipsec_proto_udp_encap_custom_ports(const void *data __rte_unused) } static int -test_ipsec_proto_udp_encap(const void *data __rte_unused) +test_ipsec_proto_udp_encap(void) { struct ipsec_test_flags flags; @@ -10334,7 +10720,7 @@ test_ipsec_proto_udp_encap(const void *data __rte_unused) } static int -test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) +test_ipsec_proto_tunnel_src_dst_addr_verify(void) { struct ipsec_test_flags flags; @@ -10346,7 +10732,7 @@ test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) } static int -test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused) +test_ipsec_proto_tunnel_dst_addr_verify(void) { struct ipsec_test_flags flags; @@ -10358,7 +10744,7 @@ test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused) } static int -test_ipsec_proto_udp_ports_verify(const void *data __rte_unused) +test_ipsec_proto_udp_ports_verify(void) { struct ipsec_test_flags flags; @@ -10371,7 +10757,7 @@ test_ipsec_proto_udp_ports_verify(const void *data __rte_unused) } static int -test_ipsec_proto_inner_ip_csum(const void *data __rte_unused) +test_ipsec_proto_inner_ip_csum(void) { struct ipsec_test_flags flags; @@ -10383,7 +10769,7 @@ test_ipsec_proto_inner_ip_csum(const void *data __rte_unused) } static int -test_ipsec_proto_inner_l4_csum(const void *data __rte_unused) +test_ipsec_proto_inner_l4_csum(void) { struct ipsec_test_flags flags; @@ -10395,7 +10781,7 @@ test_ipsec_proto_inner_l4_csum(const void *data __rte_unused) } static int -test_ipsec_proto_tunnel_v4_in_v4(const void *data __rte_unused) +test_ipsec_proto_tunnel_v4_in_v4(void) { struct ipsec_test_flags flags; @@ -10408,7 +10794,7 @@ test_ipsec_proto_tunnel_v4_in_v4(const void *data __rte_unused) } static int -test_ipsec_proto_tunnel_v6_in_v6(const void *data __rte_unused) +test_ipsec_proto_tunnel_v6_in_v6(void) { struct ipsec_test_flags flags; @@ -10421,7 +10807,7 @@ test_ipsec_proto_tunnel_v6_in_v6(const void *data __rte_unused) } static int -test_ipsec_proto_tunnel_v4_in_v6(const void *data __rte_unused) +test_ipsec_proto_tunnel_v4_in_v6(void) { struct ipsec_test_flags flags; @@ -10434,7 +10820,7 @@ test_ipsec_proto_tunnel_v4_in_v6(const void *data __rte_unused) } static int -test_ipsec_proto_tunnel_v6_in_v4(const void *data __rte_unused) +test_ipsec_proto_tunnel_v6_in_v4(void) { struct ipsec_test_flags flags; @@ -10447,7 +10833,7 @@ test_ipsec_proto_tunnel_v6_in_v4(const void *data __rte_unused) } static int -test_ipsec_proto_transport_v4(const void *data __rte_unused) +test_ipsec_proto_transport_v4(void) { struct ipsec_test_flags flags; @@ -10460,7 +10846,7 @@ test_ipsec_proto_transport_v4(const void *data __rte_unused) } static int -test_ipsec_proto_transport_l4_csum(const void *data __rte_unused) +test_ipsec_proto_transport_l4_csum(void) { struct ipsec_test_flags flags = { .l4_csum = true, @@ -10471,7 +10857,7 @@ test_ipsec_proto_transport_l4_csum(const void *data __rte_unused) } static int -test_ipsec_proto_stats(const void *data __rte_unused) +test_ipsec_proto_stats(void) { struct ipsec_test_flags flags; @@ -10483,7 +10869,7 @@ test_ipsec_proto_stats(const void *data __rte_unused) } static int -test_ipsec_proto_pkt_fragment(const void *data __rte_unused) +test_ipsec_proto_pkt_fragment(void) { struct ipsec_test_flags flags; @@ -10496,7 +10882,7 @@ test_ipsec_proto_pkt_fragment(const void *data __rte_unused) } static int -test_ipsec_proto_copy_df_inner_0(const void *data __rte_unused) +test_ipsec_proto_copy_df_inner_0(void) { struct ipsec_test_flags flags; @@ -10508,7 +10894,7 @@ test_ipsec_proto_copy_df_inner_0(const void *data __rte_unused) } static int -test_ipsec_proto_copy_df_inner_1(const void *data __rte_unused) +test_ipsec_proto_copy_df_inner_1(void) { struct ipsec_test_flags flags; @@ -10520,7 +10906,7 @@ test_ipsec_proto_copy_df_inner_1(const void *data __rte_unused) } static int -test_ipsec_proto_set_df_0_inner_1(const void *data __rte_unused) +test_ipsec_proto_set_df_0_inner_1(void) { struct ipsec_test_flags flags; @@ -10532,7 +10918,7 @@ test_ipsec_proto_set_df_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_proto_set_df_1_inner_0(const void *data __rte_unused) +test_ipsec_proto_set_df_1_inner_0(void) { struct ipsec_test_flags flags; @@ -10544,7 +10930,7 @@ test_ipsec_proto_set_df_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) +test_ipsec_proto_ipv4_copy_dscp_inner_0(void) { struct ipsec_test_flags flags; @@ -10556,7 +10942,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) } static int -test_ipsec_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) +test_ipsec_proto_ipv4_copy_dscp_inner_1(void) { struct ipsec_test_flags flags; @@ -10568,7 +10954,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) } static int -test_ipsec_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) +test_ipsec_proto_ipv4_set_dscp_0_inner_1(void) { struct ipsec_test_flags flags; @@ -10584,7 +10970,7 @@ test_ipsec_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) +test_ipsec_proto_ipv4_set_dscp_1_inner_0(void) { struct ipsec_test_flags flags; @@ -10600,7 +10986,7 @@ test_ipsec_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) +test_ipsec_proto_ipv6_copy_dscp_inner_0(void) { struct ipsec_test_flags flags; @@ -10614,7 +11000,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) } static int -test_ipsec_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) +test_ipsec_proto_ipv6_copy_dscp_inner_1(void) { struct ipsec_test_flags flags; @@ -10628,7 +11014,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) } static int -test_ipsec_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) +test_ipsec_proto_ipv6_set_dscp_0_inner_1(void) { struct ipsec_test_flags flags; @@ -10646,7 +11032,7 @@ test_ipsec_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) +test_ipsec_proto_ipv6_set_dscp_1_inner_0(void) { struct ipsec_test_flags flags; @@ -10664,7 +11050,7 @@ test_ipsec_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_proto_sgl(const void *data __rte_unused) +test_ipsec_proto_sgl(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; struct rte_cryptodev_info dev_info; @@ -10683,6 +11069,27 @@ test_ipsec_proto_sgl(const void *data __rte_unused) return test_ipsec_proto_all(&flags); } +static int +test_ipsec_proto_sgl_ext_mbuf(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + struct rte_cryptodev_info dev_info; + + struct ipsec_test_flags flags = { + .nb_segs_in_mbuf = 5, + .use_ext_mbuf = 1 + }; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) { + printf("Device doesn't support in-place scatter-gather. " + "Test Skipped.\n"); + return TEST_SKIPPED; + } + + return test_ipsec_proto_all(&flags); +} + static int test_ipsec_pkt_replay(const void *test_data, const uint64_t esn[], bool replayed_pkt[], uint32_t nb_pkts, bool esn_en, @@ -10872,7 +11279,7 @@ test_PDCP_PROTO_all(void) } static int -test_ipsec_proto_ipv4_ttl_decrement(const void *data __rte_unused) +test_ipsec_proto_ipv4_ttl_decrement(void) { struct ipsec_test_flags flags = { .dec_ttl_or_hop_limit = true @@ -10882,7 +11289,7 @@ test_ipsec_proto_ipv4_ttl_decrement(const void *data __rte_unused) } static int -test_ipsec_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) +test_ipsec_proto_ipv6_hop_limit_decrement(void) { struct ipsec_test_flags flags = { .ipv6 = true, @@ -10905,9 +11312,7 @@ test_docsis_proto_uplink(const void *data) uint32_t crc_data_len; int ret = TEST_SUCCESS; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx( - ts_params->valid_devs[0]); + void *ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); /* Verify the capabilities */ struct rte_security_capability_idx sec_cap_idx; @@ -11089,9 +11494,7 @@ test_docsis_proto_downlink(const void *data) int32_t cipher_len, crc_len; int ret = TEST_SUCCESS; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx( - ts_params->valid_devs[0]); + void *ctx = rte_cryptodev_get_sec_ctx(ts_params->valid_devs[0]); /* Verify the capabilities */ struct rte_security_capability_idx sec_cap_idx; @@ -13538,7 +13941,7 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_GENERATE); - if (retval == -ENOTSUP) + if (retval == TEST_SKIPPED) return TEST_SKIPPED; if (retval < 0) return retval; @@ -13671,7 +14074,7 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_VERIFY); - if (retval == -ENOTSUP) + if (retval == TEST_SKIPPED) return TEST_SKIPPED; if (retval < 0) return retval; @@ -13802,7 +14205,7 @@ test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata, retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_GENERATE); - if (retval == -ENOTSUP) + if (retval == TEST_SKIPPED) return TEST_SKIPPED; if (retval < 0) return retval; @@ -14419,7 +14822,7 @@ test_authentication_verify_fail_when_data_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY); - if (retval == -ENOTSUP) + if (retval == TEST_SKIPPED) return TEST_SKIPPED; if (retval < 0) return retval; @@ -14508,6 +14911,8 @@ test_authentication_verify_GMAC_fail_when_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_CIPHER_OP_DECRYPT); + if (retval == TEST_SKIPPED) + return TEST_SKIPPED; if (retval < 0) return retval; @@ -14600,8 +15005,7 @@ test_authenticated_decryption_fail_when_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_CIPHER_OP_DECRYPT); - - if (retval == -ENOTSUP) + if (retval == TEST_SKIPPED) return TEST_SKIPPED; if (retval < 0) return retval; @@ -15755,6 +16159,10 @@ static struct unit_test_suite ipsec_proto_testsuite = { "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)", ut_setup_security, ut_teardown, test_ipsec_proto_known_vec, &pkt_aes_128_gcm), + TEST_CASE_NAMED_WITH_DATA( + "Outbound known vector ext_mbuf mode (ESP tunnel mode IPv4 AES-GCM 128)", + ut_setup_security, ut_teardown, + test_ipsec_proto_known_vec_ext_mbuf, &pkt_aes_128_gcm), TEST_CASE_NAMED_WITH_DATA( "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)", ut_setup_security, ut_teardown, @@ -16137,6 +16545,14 @@ static struct unit_test_suite ipsec_proto_testsuite = { "Multi-segmented mode", ut_setup_security, ut_teardown, test_ipsec_proto_sgl), + TEST_CASE_NAMED_ST( + "Multi-segmented external mbuf mode", + ut_setup_security, ut_teardown, + test_ipsec_proto_sgl_ext_mbuf), + TEST_CASE_NAMED_WITH_DATA( + "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128) Rx inject", + ut_setup_security_rx_inject, ut_teardown_rx_inject, + test_ipsec_proto_known_vec_inb_rx_inject, &pkt_aes_128_gcm), TEST_CASES_END() /**< NULL terminate unit test array */ } }; @@ -17391,6 +17807,12 @@ test_cryptodev_scheduler(void) &scheduler_config, &end_testsuite }; + struct unit_test_suite *sched_mode_static_suites[] = { +#ifdef RTE_LIB_SECURITY + &docsis_proto_testsuite, +#endif + &end_testsuite + }; static struct unit_test_suite ts = { .suite_name = "Scheduler Unit Test Suite", .setup = scheduler_testsuite_setup, @@ -17416,9 +17838,13 @@ test_cryptodev_scheduler(void) uint8_t blk_i = 0; sched_mode_suites[sched_i]->unit_test_suites = malloc(sizeof (struct unit_test_suite *) * - (RTE_DIM(blk_suites) + 1)); + (RTE_DIM(blk_suites) + + RTE_DIM(sched_mode_static_suites) + 1)); ADD_BLOCKCIPHER_TESTSUITE(blk_i, (*sched_mode_suites[sched_i]), blk_suites, RTE_DIM(blk_suites)); + ADD_STATIC_TESTSUITE(blk_i, (*sched_mode_suites[sched_i]), + sched_mode_static_suites, + RTE_DIM(sched_mode_static_suites)); sched_mode_suites[sched_i]->unit_test_suites[blk_i] = &end_testsuite; } @@ -17439,7 +17865,7 @@ test_cryptodev_scheduler(void) return ret; } -REGISTER_TEST_COMMAND(cryptodev_scheduler_autotest, test_cryptodev_scheduler); +REGISTER_DRIVER_TEST(cryptodev_scheduler_autotest, test_cryptodev_scheduler); #endif @@ -17519,6 +17945,12 @@ test_cryptodev_cn10k(void) return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD)); } +static int +test_cryptodev_cn10k_raw_api(void) +{ + return run_cryptodev_raw_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD)); +} + static int test_cryptodev_dpaa2_sec_raw_api(void) { @@ -17531,37 +17963,39 @@ test_cryptodev_dpaa_sec_raw_api(void) return run_cryptodev_raw_testsuite(RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)); } +REGISTER_TEST_COMMAND(cryptodev_cn10k_raw_api_autotest, + test_cryptodev_cn10k_raw_api); REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest, test_cryptodev_dpaa2_sec_raw_api); REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest, test_cryptodev_dpaa_sec_raw_api); -REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest, +REGISTER_DRIVER_TEST(cryptodev_qat_raw_api_autotest, test_cryptodev_qat_raw_api); -REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat); -REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb); -REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest, +REGISTER_DRIVER_TEST(cryptodev_qat_autotest, test_cryptodev_qat); +REGISTER_DRIVER_TEST(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb); +REGISTER_DRIVER_TEST(cryptodev_cpu_aesni_mb_autotest, test_cryptodev_cpu_aesni_mb); -REGISTER_TEST_COMMAND(cryptodev_chacha_poly_mb_autotest, +REGISTER_DRIVER_TEST(cryptodev_chacha_poly_mb_autotest, test_cryptodev_chacha_poly_mb); -REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl); -REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_autotest, test_cryptodev_aesni_gcm); -REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_gcm_autotest, +REGISTER_DRIVER_TEST(cryptodev_openssl_autotest, test_cryptodev_openssl); +REGISTER_DRIVER_TEST(cryptodev_aesni_gcm_autotest, test_cryptodev_aesni_gcm); +REGISTER_DRIVER_TEST(cryptodev_cpu_aesni_gcm_autotest, test_cryptodev_cpu_aesni_gcm); REGISTER_TEST_COMMAND(cryptodev_mlx5_autotest, test_cryptodev_mlx5); -REGISTER_TEST_COMMAND(cryptodev_null_autotest, test_cryptodev_null); -REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g); -REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi); -REGISTER_TEST_COMMAND(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc); -REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8); -REGISTER_TEST_COMMAND(cryptodev_sw_mvsam_autotest, test_cryptodev_mrvl); -REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec); -REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec); +REGISTER_DRIVER_TEST(cryptodev_null_autotest, test_cryptodev_null); +REGISTER_DRIVER_TEST(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g); +REGISTER_DRIVER_TEST(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi); +REGISTER_DRIVER_TEST(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc); +REGISTER_DRIVER_TEST(cryptodev_sw_armv8_autotest, test_cryptodev_armv8); +REGISTER_DRIVER_TEST(cryptodev_sw_mvsam_autotest, test_cryptodev_mrvl); +REGISTER_DRIVER_TEST(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec); +REGISTER_DRIVER_TEST(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec); REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp); -REGISTER_TEST_COMMAND(cryptodev_uadk_autotest, test_cryptodev_uadk); +REGISTER_DRIVER_TEST(cryptodev_uadk_autotest, test_cryptodev_uadk); REGISTER_TEST_COMMAND(cryptodev_virtio_autotest, test_cryptodev_virtio); REGISTER_TEST_COMMAND(cryptodev_octeontx_autotest, test_cryptodev_octeontx); REGISTER_TEST_COMMAND(cryptodev_caam_jr_autotest, test_cryptodev_caam_jr); REGISTER_TEST_COMMAND(cryptodev_nitrox_autotest, test_cryptodev_nitrox); REGISTER_TEST_COMMAND(cryptodev_bcmfs_autotest, test_cryptodev_bcmfs); -REGISTER_TEST_COMMAND(cryptodev_cn9k_autotest, test_cryptodev_cn9k); -REGISTER_TEST_COMMAND(cryptodev_cn10k_autotest, test_cryptodev_cn10k); +REGISTER_DRIVER_TEST(cryptodev_cn9k_autotest, test_cryptodev_cn9k); +REGISTER_DRIVER_TEST(cryptodev_cn10k_autotest, test_cryptodev_cn10k); diff --git a/app/test/test_cryptodev_asym.c b/app/test/test_cryptodev_asym.c index 0ef2642fdd2..94bb091df35 100644 --- a/app/test/test_cryptodev_asym.c +++ b/app/test/test_cryptodev_asym.c @@ -608,6 +608,7 @@ static inline void print_asym_capa( break; case RTE_CRYPTO_ASYM_XFORM_ECDSA: case RTE_CRYPTO_ASYM_XFORM_ECPM: + case RTE_CRYPTO_ASYM_XFORM_SM2: default: break; } @@ -1211,7 +1212,7 @@ test_mod_exp(void) } static int -test_dh_keygenration(void) +test_dh_key_generation(void) { int status; @@ -1503,6 +1504,12 @@ test_ecdsa_sign_verify(enum curve curve_id) xform.next = NULL; xform.xform_type = RTE_CRYPTO_ASYM_XFORM_ECDSA; xform.ec.curve_id = input_params.curve; + xform.ec.pkey.data = input_params.pkey.data; + xform.ec.pkey.length = input_params.pkey.length; + xform.ec.q.x.data = input_params.pubkey_qx.data; + xform.ec.q.x.length = input_params.pubkey_qx.length; + xform.ec.q.y.data = input_params.pubkey_qy.data; + xform.ec.q.y.length = input_params.pubkey_qy.length; ret = rte_cryptodev_asym_session_create(dev_id, &xform, sess_mpool, &sess); if (ret < 0) { @@ -1524,8 +1531,6 @@ test_ecdsa_sign_verify(enum curve curve_id) op->asym->ecdsa.message.length = input_params.digest.length; op->asym->ecdsa.k.data = input_params.scalar.data; op->asym->ecdsa.k.length = input_params.scalar.length; - op->asym->ecdsa.pkey.data = input_params.pkey.data; - op->asym->ecdsa.pkey.length = input_params.pkey.length; /* Init out buf */ op->asym->ecdsa.r.data = output_buf_r; @@ -1582,10 +1587,6 @@ test_ecdsa_sign_verify(enum curve curve_id) /* Populate op with operational details */ op->asym->ecdsa.op_type = RTE_CRYPTO_ASYM_OP_VERIFY; - op->asym->ecdsa.q.x.data = input_params.pubkey_qx.data; - op->asym->ecdsa.q.x.length = input_params.pubkey_qx.length; - op->asym->ecdsa.q.y.data = input_params.pubkey_qy.data; - op->asym->ecdsa.q.y.length = input_params.pubkey_qx.length; op->asym->ecdsa.r.data = asym_op->ecdsa.r.data; op->asym->ecdsa.r.length = asym_op->ecdsa.r.length; op->asym->ecdsa.s.data = asym_op->ecdsa.s.data; @@ -1806,12 +1807,14 @@ test_ecpm_all_curve(void) } static int -_test_sm2_sign(bool rnd_secret) +test_sm2_sign(void) { struct crypto_testsuite_params_asym *ts_params = &testsuite_params; struct crypto_testsuite_sm2_params input_params = sm2_param_fp256; + const struct rte_cryptodev_asymmetric_xform_capability *capa; struct rte_mempool *sess_mpool = ts_params->session_mpool; struct rte_mempool *op_mpool = ts_params->op_mpool; + struct rte_cryptodev_asym_capability_idx idx; uint8_t dev_id = ts_params->valid_devs[0]; struct rte_crypto_op *result_op = NULL; uint8_t output_buf_r[TEST_DATA_SIZE]; @@ -1822,6 +1825,12 @@ _test_sm2_sign(bool rnd_secret) int ret, status = TEST_SUCCESS; void *sess = NULL; + /* Check SM2 capability */ + idx.type = RTE_CRYPTO_ASYM_XFORM_SM2; + capa = rte_cryptodev_asym_capability_get(dev_id, &idx); + if (capa == NULL) + return -ENOTSUP; + /* Setup crypto op data structure */ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC); if (op == NULL) { @@ -1838,7 +1847,13 @@ _test_sm2_sign(bool rnd_secret) /* Setup asym xform */ xform.next = NULL; xform.xform_type = RTE_CRYPTO_ASYM_XFORM_SM2; - xform.sm2.hash = RTE_CRYPTO_AUTH_SM3; + xform.ec.curve_id = input_params.curve; + xform.ec.pkey.data = input_params.pkey.data; + xform.ec.pkey.length = input_params.pkey.length; + xform.ec.q.x.data = input_params.pubkey_qx.data; + xform.ec.q.x.length = input_params.pubkey_qx.length; + xform.ec.q.y.data = input_params.pubkey_qy.data; + xform.ec.q.y.length = input_params.pubkey_qy.length; ret = rte_cryptodev_asym_session_create(dev_id, &xform, sess_mpool, &sess); if (ret < 0) { @@ -1856,17 +1871,24 @@ _test_sm2_sign(bool rnd_secret) /* Populate op with operational details */ asym_op->sm2.op_type = RTE_CRYPTO_ASYM_OP_SIGN; - asym_op->sm2.message.data = input_params.message.data; - asym_op->sm2.message.length = input_params.message.length; - asym_op->sm2.pkey.data = input_params.pkey.data; - asym_op->sm2.pkey.length = input_params.pkey.length; - asym_op->sm2.q.x.data = input_params.pubkey_qx.data; - asym_op->sm2.q.x.length = input_params.pubkey_qx.length; - asym_op->sm2.q.y.data = input_params.pubkey_qy.data; - asym_op->sm2.q.y.length = input_params.pubkey_qy.length; - asym_op->sm2.id.data = input_params.id.data; - asym_op->sm2.id.length = input_params.id.length; - if (rnd_secret) { + if (rte_cryptodev_asym_xform_capability_check_hash(capa, RTE_CRYPTO_AUTH_SM3)) + asym_op->sm2.hash = RTE_CRYPTO_AUTH_SM3; + else + asym_op->sm2.hash = RTE_CRYPTO_AUTH_NULL; + + if (asym_op->sm2.hash == RTE_CRYPTO_AUTH_SM3) { + asym_op->sm2.message.data = input_params.message.data; + asym_op->sm2.message.length = input_params.message.length; + asym_op->sm2.id.data = input_params.id.data; + asym_op->sm2.id.length = input_params.id.length; + } else { + asym_op->sm2.message.data = input_params.digest.data; + asym_op->sm2.message.length = input_params.digest.length; + asym_op->sm2.id.data = NULL; + asym_op->sm2.id.length = 0; + } + + if (capa->internal_rng != 0) { asym_op->sm2.k.data = NULL; asym_op->sm2.k.length = 0; } else { @@ -1915,7 +1937,7 @@ _test_sm2_sign(bool rnd_secret) debug_hexdump(stdout, "s:", asym_op->sm2.s.data, asym_op->sm2.s.length); - if (!rnd_secret) { + if (capa->internal_rng == 0) { /* Verify sign (by comparison). */ if (memcmp(input_params.sign_r.data, asym_op->sm2.r.data, asym_op->sm2.r.length) != 0) { @@ -1976,25 +1998,15 @@ _test_sm2_sign(bool rnd_secret) return status; }; -static int -test_sm2_sign_rnd_secret(void) -{ - return _test_sm2_sign(true); -} - -__rte_used static int -test_sm2_sign_plain_secret(void) -{ - return _test_sm2_sign(false); -} - static int test_sm2_verify(void) { struct crypto_testsuite_params_asym *ts_params = &testsuite_params; struct crypto_testsuite_sm2_params input_params = sm2_param_fp256; + const struct rte_cryptodev_asymmetric_xform_capability *capa; struct rte_mempool *sess_mpool = ts_params->session_mpool; struct rte_mempool *op_mpool = ts_params->op_mpool; + struct rte_cryptodev_asym_capability_idx idx; uint8_t dev_id = ts_params->valid_devs[0]; struct rte_crypto_op *result_op = NULL; struct rte_crypto_asym_xform xform; @@ -2003,6 +2015,12 @@ test_sm2_verify(void) int ret, status = TEST_SUCCESS; void *sess = NULL; + /* Check SM2 capability */ + idx.type = RTE_CRYPTO_ASYM_XFORM_SM2; + capa = rte_cryptodev_asym_capability_get(dev_id, &idx); + if (capa == NULL) + return -ENOTSUP; + /* Setup crypto op data structure */ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC); if (op == NULL) { @@ -2019,7 +2037,13 @@ test_sm2_verify(void) /* Setup asym xform */ xform.next = NULL; xform.xform_type = RTE_CRYPTO_ASYM_XFORM_SM2; - xform.sm2.hash = RTE_CRYPTO_AUTH_SM3; + xform.ec.curve_id = input_params.curve; + xform.ec.pkey.data = input_params.pkey.data; + xform.ec.pkey.length = input_params.pkey.length; + xform.ec.q.x.data = input_params.pubkey_qx.data; + xform.ec.q.x.length = input_params.pubkey_qx.length; + xform.ec.q.y.data = input_params.pubkey_qy.data; + xform.ec.q.y.length = input_params.pubkey_qy.length; ret = rte_cryptodev_asym_session_create(dev_id, &xform, sess_mpool, &sess); if (ret < 0) { @@ -2037,20 +2061,28 @@ test_sm2_verify(void) /* Populate op with operational details */ asym_op->sm2.op_type = RTE_CRYPTO_ASYM_OP_VERIFY; - asym_op->sm2.message.data = input_params.message.data; - asym_op->sm2.message.length = input_params.message.length; - asym_op->sm2.pkey.data = input_params.pkey.data; - asym_op->sm2.pkey.length = input_params.pkey.length; - asym_op->sm2.q.x.data = input_params.pubkey_qx.data; - asym_op->sm2.q.x.length = input_params.pubkey_qx.length; - asym_op->sm2.q.y.data = input_params.pubkey_qy.data; - asym_op->sm2.q.y.length = input_params.pubkey_qy.length; + + if (rte_cryptodev_asym_xform_capability_check_hash(capa, RTE_CRYPTO_AUTH_SM3)) + asym_op->sm2.hash = RTE_CRYPTO_AUTH_SM3; + else + asym_op->sm2.hash = RTE_CRYPTO_AUTH_NULL; + + if (asym_op->sm2.hash == RTE_CRYPTO_AUTH_SM3) { + asym_op->sm2.message.data = input_params.message.data; + asym_op->sm2.message.length = input_params.message.length; + asym_op->sm2.id.data = input_params.id.data; + asym_op->sm2.id.length = input_params.id.length; + } else { + asym_op->sm2.message.data = input_params.digest.data; + asym_op->sm2.message.length = input_params.digest.length; + asym_op->sm2.id.data = NULL; + asym_op->sm2.id.length = 0; + } + asym_op->sm2.r.data = input_params.sign_r.data; asym_op->sm2.r.length = input_params.sign_r.length; asym_op->sm2.s.data = input_params.sign_s.data; asym_op->sm2.s.length = input_params.sign_s.length; - asym_op->sm2.id.data = input_params.id.data; - asym_op->sm2.id.length = input_params.id.length; RTE_LOG(DEBUG, USER1, "Process ASYM operation\n"); @@ -2090,13 +2122,15 @@ test_sm2_verify(void) }; static int -_test_sm2_enc(bool rnd_secret) +test_sm2_enc(void) { struct crypto_testsuite_params_asym *ts_params = &testsuite_params; struct crypto_testsuite_sm2_params input_params = sm2_param_fp256; + const struct rte_cryptodev_asymmetric_xform_capability *capa; struct rte_mempool *sess_mpool = ts_params->session_mpool; struct rte_mempool *op_mpool = ts_params->op_mpool; uint8_t output_buf[TEST_DATA_SIZE], *pbuf = NULL; + struct rte_cryptodev_asym_capability_idx idx; uint8_t dev_id = ts_params->valid_devs[0]; struct rte_crypto_op *result_op = NULL; struct rte_crypto_asym_xform xform; @@ -2105,6 +2139,12 @@ _test_sm2_enc(bool rnd_secret) int ret, status = TEST_SUCCESS; void *sess = NULL; + /* Check SM2 capability */ + idx.type = RTE_CRYPTO_ASYM_XFORM_SM2; + capa = rte_cryptodev_asym_capability_get(dev_id, &idx); + if (capa == NULL) + return -ENOTSUP; + /* Setup crypto op data structure */ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC); if (op == NULL) { @@ -2120,7 +2160,13 @@ _test_sm2_enc(bool rnd_secret) /* Setup asym xform */ xform.next = NULL; xform.xform_type = RTE_CRYPTO_ASYM_XFORM_SM2; - xform.sm2.hash = RTE_CRYPTO_AUTH_SM3; + xform.ec.curve_id = input_params.curve; + xform.ec.pkey.data = input_params.pkey.data; + xform.ec.pkey.length = input_params.pkey.length; + xform.ec.q.x.data = input_params.pubkey_qx.data; + xform.ec.q.x.length = input_params.pubkey_qx.length; + xform.ec.q.y.data = input_params.pubkey_qy.data; + xform.ec.q.y.length = input_params.pubkey_qy.length; ret = rte_cryptodev_asym_session_create(dev_id, &xform, sess_mpool, &sess); if (ret < 0) { @@ -2138,15 +2184,15 @@ _test_sm2_enc(bool rnd_secret) /* Populate op with operational details */ asym_op->sm2.op_type = RTE_CRYPTO_ASYM_OP_ENCRYPT; + if (rte_cryptodev_asym_xform_capability_check_hash(capa, RTE_CRYPTO_AUTH_SM3)) + asym_op->sm2.hash = RTE_CRYPTO_AUTH_SM3; + else + asym_op->sm2.hash = RTE_CRYPTO_AUTH_NULL; + asym_op->sm2.message.data = input_params.message.data; asym_op->sm2.message.length = input_params.message.length; - asym_op->sm2.pkey.data = input_params.pkey.data; - asym_op->sm2.pkey.length = input_params.pkey.length; - asym_op->sm2.q.x.data = input_params.pubkey_qx.data; - asym_op->sm2.q.x.length = input_params.pubkey_qx.length; - asym_op->sm2.q.y.data = input_params.pubkey_qy.data; - asym_op->sm2.q.y.length = input_params.pubkey_qy.length; - if (rnd_secret) { + + if (capa->internal_rng != 0) { asym_op->sm2.k.data = NULL; asym_op->sm2.k.length = 0; } else { @@ -2192,7 +2238,7 @@ _test_sm2_enc(bool rnd_secret) debug_hexdump(stdout, "cipher:", asym_op->sm2.cipher.data, asym_op->sm2.cipher.length); - if (!rnd_secret) { + if (capa->internal_rng == 0) { if (memcmp(input_params.cipher.data, asym_op->sm2.cipher.data, asym_op->sm2.cipher.length) != 0) { status = TEST_FAILED; @@ -2256,25 +2302,15 @@ _test_sm2_enc(bool rnd_secret) return status; }; -static int -test_sm2_enc_rnd_secret(void) -{ - return _test_sm2_enc(true); -} - -__rte_used static int -test_sm2_enc_plain_secret(void) -{ - return _test_sm2_enc(false); -} - static int test_sm2_dec(void) { struct crypto_testsuite_params_asym *ts_params = &testsuite_params; struct crypto_testsuite_sm2_params input_params = sm2_param_fp256; + const struct rte_cryptodev_asymmetric_xform_capability *capa; struct rte_mempool *sess_mpool = ts_params->session_mpool; struct rte_mempool *op_mpool = ts_params->op_mpool; + struct rte_cryptodev_asym_capability_idx idx; uint8_t dev_id = ts_params->valid_devs[0]; struct rte_crypto_op *result_op = NULL; uint8_t output_buf_m[TEST_DATA_SIZE]; @@ -2284,6 +2320,12 @@ test_sm2_dec(void) int ret, status = TEST_SUCCESS; void *sess = NULL; + /* Check SM2 capability */ + idx.type = RTE_CRYPTO_ASYM_XFORM_SM2; + capa = rte_cryptodev_asym_capability_get(dev_id, &idx); + if (capa == NULL) + return -ENOTSUP; + /* Setup crypto op data structure */ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC); if (op == NULL) { @@ -2299,7 +2341,13 @@ test_sm2_dec(void) /* Setup asym xform */ xform.next = NULL; xform.xform_type = RTE_CRYPTO_ASYM_XFORM_SM2; - xform.sm2.hash = RTE_CRYPTO_AUTH_SM3; + xform.ec.curve_id = input_params.curve; + xform.ec.pkey.data = input_params.pkey.data; + xform.ec.pkey.length = input_params.pkey.length; + xform.ec.q.x.data = input_params.pubkey_qx.data; + xform.ec.q.x.length = input_params.pubkey_qx.length; + xform.ec.q.y.data = input_params.pubkey_qy.data; + xform.ec.q.y.length = input_params.pubkey_qy.length; ret = rte_cryptodev_asym_session_create(dev_id, &xform, sess_mpool, &sess); if (ret < 0) { @@ -2317,14 +2365,13 @@ test_sm2_dec(void) /* Populate op with operational details */ asym_op->sm2.op_type = RTE_CRYPTO_ASYM_OP_DECRYPT; + if (rte_cryptodev_asym_xform_capability_check_hash(capa, RTE_CRYPTO_AUTH_SM3)) + asym_op->sm2.hash = RTE_CRYPTO_AUTH_SM3; + else + asym_op->sm2.hash = RTE_CRYPTO_AUTH_NULL; + asym_op->sm2.cipher.data = input_params.cipher.data; asym_op->sm2.cipher.length = input_params.cipher.length; - asym_op->sm2.pkey.data = input_params.pkey.data; - asym_op->sm2.pkey.length = input_params.pkey.length; - asym_op->sm2.q.x.data = input_params.pubkey_qx.data; - asym_op->sm2.q.x.length = input_params.pubkey_qx.length; - asym_op->sm2.q.y.data = input_params.pubkey_qy.data; - asym_op->sm2.q.y.length = input_params.pubkey_qy.length; /* Init out buf */ asym_op->sm2.message.data = output_buf_m; @@ -2684,10 +2731,10 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_capability), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_dsa), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, - test_dh_keygenration), - TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_sign_rnd_secret), + test_dh_key_generation), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_sign), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_verify), - TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_enc_rnd_secret), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_enc), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_dec), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_rsa_enc_dec), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, @@ -2751,6 +2798,8 @@ static struct unit_test_suite cryptodev_octeontx_asym_testsuite = { TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_mod_exp), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_ecdsa_sign_verify_all_curve), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_sign), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_sm2_verify), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_ecpm_all_curve), TEST_CASES_END() /**< NULL terminate unit test array */ @@ -2825,10 +2874,9 @@ test_cryptodev_cn10k_asym(void) return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite); } -REGISTER_TEST_COMMAND(cryptodev_openssl_asym_autotest, - test_cryptodev_openssl_asym); +REGISTER_DRIVER_TEST(cryptodev_openssl_asym_autotest, test_cryptodev_openssl_asym); -REGISTER_TEST_COMMAND(cryptodev_qat_asym_autotest, test_cryptodev_qat_asym); +REGISTER_DRIVER_TEST(cryptodev_qat_asym_autotest, test_cryptodev_qat_asym); REGISTER_TEST_COMMAND(cryptodev_octeontx_asym_autotest, test_cryptodev_octeontx_asym); diff --git a/app/test/test_cryptodev_mixed_test_vectors.h b/app/test/test_cryptodev_mixed_test_vectors.h index 161e2d905fb..9c4313185eb 100644 --- a/app/test/test_cryptodev_mixed_test_vectors.h +++ b/app/test/test_cryptodev_mixed_test_vectors.h @@ -478,8 +478,10 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_snow_test_case_1 = { }, .cipher_iv = { .data = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .len = 0, + .len = 16, }, .cipher = { .len_bits = 516 << 3, @@ -917,8 +919,10 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_zuc_test_case_1 = { }, .cipher_iv = { .data = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .len = 0, + .len = 16, }, .cipher = { .len_bits = 516 << 3, diff --git a/app/test/test_cryptodev_security_ipsec.c b/app/test/test_cryptodev_security_ipsec.c index 7a8688c692d..205714b2704 100644 --- a/app/test/test_cryptodev_security_ipsec.c +++ b/app/test/test_cryptodev_security_ipsec.c @@ -213,6 +213,14 @@ test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform, } } + if (ipsec_xform->options.ingress_oop == 1 && + sec_cap->ipsec.options.ingress_oop == 0) { + if (!silent) + RTE_LOG(INFO, USER1, + "Inline Ingress OOP processing is not supported\n"); + return -ENOTSUP; + } + return 0; } @@ -1241,7 +1249,7 @@ test_ipsec_status_check(const struct ipsec_test_data *td, } int -test_ipsec_stats_verify(struct rte_security_ctx *ctx, +test_ipsec_stats_verify(void *ctx, void *sess, const struct ipsec_test_flags *flags, enum rte_security_ipsec_sa_direction dir) diff --git a/app/test/test_cryptodev_security_ipsec.h b/app/test/test_cryptodev_security_ipsec.h index 92e641ba0b1..d7fc5627516 100644 --- a/app/test/test_cryptodev_security_ipsec.h +++ b/app/test/test_cryptodev_security_ipsec.h @@ -103,6 +103,7 @@ struct ipsec_test_flags { bool fragment; bool stats_success; bool antireplay; + bool use_ext_mbuf; enum df_flags df; enum dscp_flags dscp; enum flabel_flags flabel; @@ -110,6 +111,8 @@ struct ipsec_test_flags { bool ah; uint32_t plaintext_len; int nb_segs_in_mbuf; + bool inb_oop; + bool rx_inject; }; struct crypto_param { @@ -300,7 +303,7 @@ int test_ipsec_status_check(const struct ipsec_test_data *td, enum rte_security_ipsec_sa_direction dir, int pkt_num); -int test_ipsec_stats_verify(struct rte_security_ctx *ctx, +int test_ipsec_stats_verify(void *ctx, void *sess, const struct ipsec_test_flags *flags, enum rte_security_ipsec_sa_direction dir); diff --git a/app/test/test_cryptodev_sm2_test_vectors.h b/app/test/test_cryptodev_sm2_test_vectors.h index 7a4ce70c10f..41f5f7074a1 100644 --- a/app/test/test_cryptodev_sm2_test_vectors.h +++ b/app/test/test_cryptodev_sm2_test_vectors.h @@ -17,6 +17,8 @@ struct crypto_testsuite_sm2_params { rte_crypto_param id; rte_crypto_param cipher; rte_crypto_param message; + rte_crypto_param digest; + int curve; }; static uint8_t fp256_pkey[] = { @@ -45,17 +47,17 @@ static uint8_t fp256_k[] = { }; static uint8_t fp256_sign_r[] = { - 0xf3, 0x26, 0x10, 0xde, 0xfb, 0xbf, 0x13, 0xd4, - 0x73, 0xb1, 0xc2, 0x80, 0x51, 0x06, 0x29, 0xf9, - 0xfb, 0xc8, 0x11, 0xa7, 0x8d, 0x2c, 0xcb, 0x09, - 0x7c, 0xb2, 0xcf, 0x58, 0x0b, 0x5e, 0x25, 0xff + 0x75, 0x2B, 0x8C, 0x15, 0x38, 0x10, 0xF6, 0xC0, + 0x28, 0xC9, 0x8A, 0x51, 0xD0, 0x62, 0x69, 0x4B, + 0xF6, 0x58, 0x06, 0xEB, 0xF1, 0x91, 0x1F, 0x15, + 0x8B, 0x08, 0x09, 0xF9, 0x88, 0x0A, 0x44, 0x24 }; static uint8_t fp256_sign_s[] = { - 0x8d, 0x8d, 0xb5, 0x40, 0xe3, 0xfb, 0x98, 0xf9, - 0x8c, 0xe4, 0x58, 0x60, 0xf2, 0x78, 0x8f, 0xd9, - 0xbf, 0xb8, 0x47, 0x73, 0x88, 0xc1, 0xd1, 0xcd, - 0x2d, 0xdb, 0xe3, 0xc1, 0x44, 0x30, 0x25, 0x86 + 0x5A, 0x3C, 0x96, 0x3E, 0x1C, 0xB4, 0x19, 0xF9, + 0xD7, 0x78, 0xB8, 0xCE, 0xFF, 0x9D, 0xB1, 0x31, + 0x77, 0xDB, 0xA0, 0xFE, 0x84, 0x61, 0x1A, 0xD9, + 0x4E, 0xFF, 0x82, 0x13, 0x1C, 0xCA, 0x04, 0x75, }; static uint8_t fp256_id[] = { @@ -67,6 +69,13 @@ static uint8_t fp256_message[] = { 0x64, 0x69, 0x67, 0x65, 0x73, 0x74 }; +static uint8_t fp256_digest[] = { + 0x0F, 0xB5, 0xCE, 0xF3, 0x3C, 0xB7, 0xD1, 0x35, + 0xA9, 0x3A, 0xC7, 0xA7, 0x89, 0x2A, 0x6D, 0x9A, + 0xF3, 0x1E, 0xC5, 0x38, 0xD3, 0x65, 0x1B, 0xB9, + 0xDF, 0x5F, 0x7F, 0x4A, 0xD8, 0x89, 0x57, 0xF1 +}; + static uint8_t fp256_cipher[] = { 0x30, 0x78, 0x02, 0x21, 0x00, 0xAB, 0xBD, 0xE8, 0xE8, 0x80, 0x93, 0x36, 0x77, 0xB6, 0x44, 0x47, @@ -120,10 +129,15 @@ struct crypto_testsuite_sm2_params sm2_param_fp256 = { .data = fp256_message, .length = sizeof(fp256_message), }, + .digest = { + .data = fp256_digest, + .length = sizeof(fp256_digest), + }, .cipher = { .data = fp256_cipher, .length = sizeof(fp256_cipher), - } + }, + .curve = RTE_CRYPTO_EC_GROUP_SM2 }; #endif /* __TEST_CRYPTODEV_SM2_TEST_VECTORS_H__ */ diff --git a/app/test/test_cycles.c b/app/test/test_cycles.c index 66d11e6db89..a7654de1763 100644 --- a/app/test/test_cycles.c +++ b/app/test/test_cycles.c @@ -53,4 +53,4 @@ test_user_delay_us(void) return 0; } -REGISTER_TEST_COMMAND(user_delay_us, test_user_delay_us); +REGISTER_FAST_TEST(user_delay_us, true, true, test_user_delay_us); diff --git a/app/test/test_debug.c b/app/test/test_debug.c index 2704f5b9272..8ad6d40fcb0 100644 --- a/app/test/test_debug.c +++ b/app/test/test_debug.c @@ -140,4 +140,4 @@ test_debug(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(debug_autotest, test_debug); +REGISTER_FAST_TEST(debug_autotest, true, true, test_debug); diff --git a/app/test/test_devargs.c b/app/test/test_devargs.c index 0a4c34a1adc..f977d44448f 100644 --- a/app/test/test_devargs.c +++ b/app/test/test_devargs.c @@ -213,4 +213,4 @@ test_devargs(void) return 0; } -REGISTER_TEST_COMMAND(devargs_autotest, test_devargs); +REGISTER_FAST_TEST(devargs_autotest, true, true, test_devargs); diff --git a/app/test/test_dispatcher.c b/app/test/test_dispatcher.c new file mode 100644 index 00000000000..6eb3f572cfc --- /dev/null +++ b/app/test/test_dispatcher.c @@ -0,0 +1,1056 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Ericsson AB + */ + +#include +#include +#include +#include +#include +#include + +#include "test.h" + +#define NUM_WORKERS 3 +#define NUM_PORTS (NUM_WORKERS + 1) +#define WORKER_PORT_ID(worker_idx) (worker_idx) +#define DRIVER_PORT_ID (NUM_PORTS - 1) + +#define NUM_SERVICE_CORES NUM_WORKERS +#define MIN_LCORES (NUM_SERVICE_CORES + 1) + +/* Eventdev */ +#define NUM_QUEUES 8 +#define LAST_QUEUE_ID (NUM_QUEUES - 1) +#define MAX_EVENTS 4096 +#define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2) +#define DEQUEUE_BURST_SIZE 32 +#define ENQUEUE_BURST_SIZE 32 + +#define NUM_EVENTS 10000000 +#define NUM_FLOWS 16 + +#define DSW_VDEV "event_dsw0" + +struct app_queue { + uint8_t queue_id; + uint64_t sn[NUM_FLOWS]; + int dispatcher_reg_id; +}; + +struct cb_count { + uint8_t expected_event_dev_id; + uint8_t expected_event_port_id[RTE_MAX_LCORE]; + RTE_ATOMIC(int) count; +}; + +struct test_app { + uint8_t event_dev_id; + struct rte_dispatcher *dispatcher; + uint32_t dispatcher_service_id; + + unsigned int service_lcores[NUM_SERVICE_CORES]; + + int never_match_reg_id; + uint64_t never_match_count; + struct cb_count never_process_count; + + struct app_queue queues[NUM_QUEUES]; + + int finalize_reg_id; + struct cb_count finalize_count; + + bool running; + + RTE_ATOMIC(int) completed_events; + RTE_ATOMIC(int) errors; +}; + +static struct test_app * +test_app_create(void) +{ + int i; + struct test_app *app; + + app = calloc(1, sizeof(struct test_app)); + + if (app == NULL) + return NULL; + + for (i = 0; i < NUM_QUEUES; i++) + app->queues[i].queue_id = i; + + return app; +} + +static void +test_app_free(struct test_app *app) +{ + free(app); +} + +static int +test_app_create_vdev(struct test_app *app) +{ + int rc; + + rc = rte_vdev_init(DSW_VDEV, NULL); + if (rc < 0) + return TEST_SKIPPED; + + rc = rte_event_dev_get_dev_id(DSW_VDEV); + + app->event_dev_id = (uint8_t)rc; + + return TEST_SUCCESS; +} + +static int +test_app_destroy_vdev(struct test_app *app) +{ + int rc; + + rc = rte_event_dev_close(app->event_dev_id); + TEST_ASSERT_SUCCESS(rc, "Error while closing event device"); + + rc = rte_vdev_uninit(DSW_VDEV); + TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device"); + + return TEST_SUCCESS; +} + +static int +test_app_setup_event_dev(struct test_app *app) +{ + int rc; + int i; + + rc = test_app_create_vdev(app); + if (rc != TEST_SUCCESS) + return rc; + + struct rte_event_dev_config config = { + .nb_event_queues = NUM_QUEUES, + .nb_event_ports = NUM_PORTS, + .nb_events_limit = MAX_EVENTS, + .nb_event_queue_flows = 64, + .nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE, + .nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE + }; + + rc = rte_event_dev_configure(app->event_dev_id, &config); + + TEST_ASSERT_SUCCESS(rc, "Unable to configure event device"); + + struct rte_event_queue_conf queue_config = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .schedule_type = RTE_SCHED_TYPE_ATOMIC, + .nb_atomic_flows = 64 + }; + + for (i = 0; i < NUM_QUEUES; i++) { + uint8_t queue_id = i; + + rc = rte_event_queue_setup(app->event_dev_id, queue_id, + &queue_config); + + TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id); + } + + struct rte_event_port_conf port_config = { + .new_event_threshold = NEW_EVENT_THRESHOLD, + .dequeue_depth = DEQUEUE_BURST_SIZE, + .enqueue_depth = ENQUEUE_BURST_SIZE + }; + + for (i = 0; i < NUM_PORTS; i++) { + uint8_t event_port_id = i; + + rc = rte_event_port_setup(app->event_dev_id, event_port_id, + &port_config); + TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d", + event_port_id); + + if (event_port_id == DRIVER_PORT_ID) + continue; + + rc = rte_event_port_link(app->event_dev_id, event_port_id, + NULL, NULL, 0); + + TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d", + event_port_id); + } + + return TEST_SUCCESS; +} + +static int +test_app_teardown_event_dev(struct test_app *app) +{ + return test_app_destroy_vdev(app); +} + +static int +test_app_start_event_dev(struct test_app *app) +{ + int rc; + + rc = rte_event_dev_start(app->event_dev_id); + TEST_ASSERT_SUCCESS(rc, "Unable to start event device"); + + return TEST_SUCCESS; +} + +static void +test_app_stop_event_dev(struct test_app *app) +{ + rte_event_dev_stop(app->event_dev_id); +} + +static int +test_app_create_dispatcher(struct test_app *app) +{ + int rc; + + app->dispatcher = rte_dispatcher_create(app->event_dev_id); + + TEST_ASSERT(app->dispatcher != NULL, "Unable to create event " + "dispatcher"); + + app->dispatcher_service_id = + rte_dispatcher_service_id_get(app->dispatcher); + + rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1); + + TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service " + "stats"); + + rc = rte_service_runstate_set(app->dispatcher_service_id, 1); + + TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate"); + + return TEST_SUCCESS; +} + +static int +test_app_free_dispatcher(struct test_app *app) +{ + int rc; + + rc = rte_service_runstate_set(app->dispatcher_service_id, 0); + TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service"); + + rc = rte_dispatcher_free(app->dispatcher); + TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher"); + + return TEST_SUCCESS; +} + +static int +test_app_bind_ports(struct test_app *app) +{ + int i; + + app->never_process_count.expected_event_dev_id = + app->event_dev_id; + app->finalize_count.expected_event_dev_id = + app->event_dev_id; + + for (i = 0; i < NUM_WORKERS; i++) { + unsigned int lcore_id = app->service_lcores[i]; + uint8_t port_id = WORKER_PORT_ID(i); + + int rc = rte_dispatcher_bind_port_to_lcore( + app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0, + lcore_id + ); + + TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d " + "to lcore %d", port_id, lcore_id); + + app->never_process_count.expected_event_port_id[lcore_id] = + port_id; + app->finalize_count.expected_event_port_id[lcore_id] = port_id; + } + + + return TEST_SUCCESS; +} + +static int +test_app_unbind_ports(struct test_app *app) +{ + int i; + + for (i = 0; i < NUM_WORKERS; i++) { + unsigned int lcore_id = app->service_lcores[i]; + + int rc = rte_dispatcher_unbind_port_from_lcore( + app->dispatcher, + WORKER_PORT_ID(i), + lcore_id + ); + + TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d " + "from lcore %d", WORKER_PORT_ID(i), + lcore_id); + } + + return TEST_SUCCESS; +} + +static bool +match_queue(const struct rte_event *event, void *cb_data) +{ + uintptr_t queue_id = (uintptr_t)cb_data; + + return event->queue_id == queue_id; +} + +static int +test_app_get_worker_index(struct test_app *app, unsigned int lcore_id) +{ + int i; + + for (i = 0; i < NUM_SERVICE_CORES; i++) + if (app->service_lcores[i] == lcore_id) + return i; + + return -1; +} + +static int +test_app_get_worker_port(struct test_app *app, unsigned int lcore_id) +{ + int worker; + + worker = test_app_get_worker_index(app, lcore_id); + + if (worker < 0) + return -1; + + return WORKER_PORT_ID(worker); +} + +static void +test_app_queue_note_error(struct test_app *app) +{ + rte_atomic_fetch_add_explicit(&app->errors, 1, rte_memory_order_relaxed); +} + +static void +test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id, + struct rte_event *in_events, uint16_t num, + void *cb_data) +{ + struct app_queue *app_queue = cb_data; + struct test_app *app = container_of(app_queue, struct test_app, + queues[app_queue->queue_id]); + unsigned int lcore_id = rte_lcore_id(); + bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID; + int event_port_id; + uint16_t i; + struct rte_event out_events[num]; + + event_port_id = test_app_get_worker_port(app, lcore_id); + + if (event_port_id < 0 || p_event_dev_id != app->event_dev_id || + p_event_port_id != event_port_id) { + test_app_queue_note_error(app); + return; + } + + for (i = 0; i < num; i++) { + const struct rte_event *in_event = &in_events[i]; + struct rte_event *out_event = &out_events[i]; + uint64_t sn = in_event->u64; + uint64_t expected_sn; + + if (in_event->queue_id != app_queue->queue_id) { + test_app_queue_note_error(app); + return; + } + + expected_sn = app_queue->sn[in_event->flow_id]++; + + if (expected_sn != sn) { + test_app_queue_note_error(app); + return; + } + + if (intermediate_queue) + *out_event = (struct rte_event) { + .queue_id = in_event->queue_id + 1, + .flow_id = in_event->flow_id, + .sched_type = RTE_SCHED_TYPE_ATOMIC, + .op = RTE_EVENT_OP_FORWARD, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .u64 = sn + }; + } + + if (intermediate_queue) { + uint16_t n = 0; + + do { + n += rte_event_enqueue_forward_burst(p_event_dev_id, + p_event_port_id, + out_events + n, + num - n); + } while (n != num); + } else + rte_atomic_fetch_add_explicit(&app->completed_events, num, + rte_memory_order_relaxed); +} + +static bool +never_match(const struct rte_event *event __rte_unused, void *cb_data) +{ + uint64_t *count = cb_data; + + (*count)++; + + return false; +} + +static void +test_app_never_process(uint8_t event_dev_id, uint8_t event_port_id, + struct rte_event *in_events __rte_unused, uint16_t num, void *cb_data) +{ + struct cb_count *count = cb_data; + unsigned int lcore_id = rte_lcore_id(); + + if (event_dev_id == count->expected_event_dev_id && + event_port_id == count->expected_event_port_id[lcore_id]) + rte_atomic_fetch_add_explicit(&count->count, num, + rte_memory_order_relaxed); +} + +static void +finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data) +{ + struct cb_count *count = cb_data; + unsigned int lcore_id = rte_lcore_id(); + + if (event_dev_id == count->expected_event_dev_id && + event_port_id == count->expected_event_port_id[lcore_id]) + rte_atomic_fetch_add_explicit(&count->count, 1, + rte_memory_order_relaxed); +} + +static int +test_app_register_callbacks(struct test_app *app) +{ + int i; + + app->never_match_reg_id = + rte_dispatcher_register(app->dispatcher, never_match, + &app->never_match_count, + test_app_never_process, + &app->never_process_count); + + TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register " + "never-match handler"); + + for (i = 0; i < NUM_QUEUES; i++) { + struct app_queue *app_queue = &app->queues[i]; + uintptr_t queue_id = app_queue->queue_id; + int reg_id; + + reg_id = rte_dispatcher_register(app->dispatcher, + match_queue, (void *)queue_id, + test_app_process_queue, + app_queue); + + TEST_ASSERT(reg_id >= 0, "Unable to register consumer " + "callback for queue %d", i); + + app_queue->dispatcher_reg_id = reg_id; + } + + app->finalize_reg_id = + rte_dispatcher_finalize_register(app->dispatcher, + finalize, + &app->finalize_count); + TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering " + "finalize callback"); + + return TEST_SUCCESS; +} + +static int +test_app_unregister_callback(struct test_app *app, uint8_t queue_id) +{ + int reg_id = app->queues[queue_id].dispatcher_reg_id; + int rc; + + if (reg_id < 0) /* unregistered already */ + return 0; + + rc = rte_dispatcher_unregister(app->dispatcher, reg_id); + + TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer " + "callback for queue %d", queue_id); + + app->queues[queue_id].dispatcher_reg_id = -1; + + return TEST_SUCCESS; +} + +static int +test_app_unregister_callbacks(struct test_app *app) +{ + int i; + int rc; + + if (app->never_match_reg_id >= 0) { + rc = rte_dispatcher_unregister(app->dispatcher, + app->never_match_reg_id); + + TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match " + "handler"); + app->never_match_reg_id = -1; + } + + for (i = 0; i < NUM_QUEUES; i++) { + rc = test_app_unregister_callback(app, i); + if (rc != TEST_SUCCESS) + return rc; + } + + if (app->finalize_reg_id >= 0) { + rc = rte_dispatcher_finalize_unregister( + app->dispatcher, app->finalize_reg_id + ); + app->finalize_reg_id = -1; + } + + return TEST_SUCCESS; +} + +static void +test_app_start_dispatcher(struct test_app *app) +{ + rte_dispatcher_start(app->dispatcher); +} + +static void +test_app_stop_dispatcher(struct test_app *app) +{ + rte_dispatcher_stop(app->dispatcher); +} + +static int +test_app_reset_dispatcher_stats(struct test_app *app) +{ + struct rte_dispatcher_stats stats; + + rte_dispatcher_stats_reset(app->dispatcher); + + memset(&stats, 0xff, sizeof(stats)); + + rte_dispatcher_stats_get(app->dispatcher, &stats); + + TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero"); + TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero"); + TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count " + "not zero"); + TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero"); + + return TEST_SUCCESS; +} + +static int +test_app_setup_service_core(struct test_app *app, unsigned int lcore_id) +{ + int rc; + + rc = rte_service_lcore_add(lcore_id); + TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher " + "service core", lcore_id); + + rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1); + TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service"); + + return TEST_SUCCESS; +} + +static int +test_app_setup_service_cores(struct test_app *app) +{ + int i; + int lcore_id = -1; + + for (i = 0; i < NUM_SERVICE_CORES; i++) { + lcore_id = rte_get_next_lcore(lcore_id, 1, 0); + + app->service_lcores[i] = lcore_id; + } + + for (i = 0; i < NUM_SERVICE_CORES; i++) { + int rc; + + rc = test_app_setup_service_core(app, app->service_lcores[i]); + if (rc != TEST_SUCCESS) + return rc; + } + + return TEST_SUCCESS; +} + +static int +test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id) +{ + int rc; + + rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0); + TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service"); + + rc = rte_service_lcore_del(lcore_id); + TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d", + lcore_id); + + return TEST_SUCCESS; +} + +static int +test_app_teardown_service_cores(struct test_app *app) +{ + int i; + + for (i = 0; i < NUM_SERVICE_CORES; i++) { + unsigned int lcore_id = app->service_lcores[i]; + int rc; + + rc = test_app_teardown_service_core(app, lcore_id); + if (rc != TEST_SUCCESS) + return rc; + } + + return TEST_SUCCESS; +} + +static int +test_app_start_service_cores(struct test_app *app) +{ + int i; + + for (i = 0; i < NUM_SERVICE_CORES; i++) { + unsigned int lcore_id = app->service_lcores[i]; + int rc; + + rc = rte_service_lcore_start(lcore_id); + TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d", + lcore_id); + } + + return TEST_SUCCESS; +} + +static int +test_app_stop_service_cores(struct test_app *app) +{ + int i; + + for (i = 0; i < NUM_SERVICE_CORES; i++) { + unsigned int lcore_id = app->service_lcores[i]; + int rc; + + rc = rte_service_lcore_stop(lcore_id); + TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d", + lcore_id); + } + + return TEST_SUCCESS; +} + +static int +test_app_start(struct test_app *app) +{ + int rc; + + rc = test_app_start_event_dev(app); + if (rc != TEST_SUCCESS) + return rc; + + rc = test_app_start_service_cores(app); + if (rc != TEST_SUCCESS) + return rc; + + test_app_start_dispatcher(app); + + app->running = true; + + return TEST_SUCCESS; +} + +static int +test_app_stop(struct test_app *app) +{ + int rc; + + test_app_stop_dispatcher(app); + + rc = test_app_stop_service_cores(app); + if (rc != TEST_SUCCESS) + return rc; + + test_app_stop_event_dev(app); + + app->running = false; + + return TEST_SUCCESS; +} + +struct test_app *test_app; + +static int +test_setup(void) +{ + int rc; + + if (rte_lcore_count() < MIN_LCORES) { + printf("Not enough cores for dispatcher_autotest; expecting at " + "least %d.\n", MIN_LCORES); + return TEST_SKIPPED; + } + + test_app = test_app_create(); + TEST_ASSERT(test_app != NULL, "Unable to allocate memory"); + + rc = test_app_setup_event_dev(test_app); + if (rc != TEST_SUCCESS) + goto err_free_app; + + rc = test_app_create_dispatcher(test_app); + if (rc != TEST_SUCCESS) + goto err_teardown_event_dev; + + rc = test_app_setup_service_cores(test_app); + if (rc != TEST_SUCCESS) + goto err_free_dispatcher; + + rc = test_app_register_callbacks(test_app); + if (rc != TEST_SUCCESS) + goto err_teardown_service_cores; + + rc = test_app_bind_ports(test_app); + if (rc != TEST_SUCCESS) + goto err_unregister_callbacks; + + return TEST_SUCCESS; + +err_unregister_callbacks: + test_app_unregister_callbacks(test_app); +err_teardown_service_cores: + test_app_teardown_service_cores(test_app); +err_free_dispatcher: + test_app_free_dispatcher(test_app); +err_teardown_event_dev: + test_app_teardown_event_dev(test_app); +err_free_app: + test_app_free(test_app); + + test_app = NULL; + + return rc; +} + +static void test_teardown(void) +{ + if (test_app == NULL) + return; + + if (test_app->running) + test_app_stop(test_app); + + test_app_teardown_service_cores(test_app); + + test_app_unregister_callbacks(test_app); + + test_app_unbind_ports(test_app); + + test_app_free_dispatcher(test_app); + + test_app_teardown_event_dev(test_app); + + test_app_free(test_app); + + test_app = NULL; +} + +static int +test_app_get_completed_events(struct test_app *app) +{ + return rte_atomic_load_explicit(&app->completed_events, + rte_memory_order_relaxed); +} + +static int +test_app_get_errors(struct test_app *app) +{ + return rte_atomic_load_explicit(&app->errors, rte_memory_order_relaxed); +} + +static int +test_basic(void) +{ + int rc; + int i; + + rc = test_app_start(test_app); + if (rc != TEST_SUCCESS) + return rc; + + uint64_t sns[NUM_FLOWS] = { 0 }; + + for (i = 0; i < NUM_EVENTS;) { + struct rte_event events[ENQUEUE_BURST_SIZE]; + int left; + int batch_size; + int j; + uint16_t n = 0; + + batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE); + left = NUM_EVENTS - i; + + batch_size = RTE_MIN(left, batch_size); + + for (j = 0; j < batch_size; j++) { + struct rte_event *event = &events[j]; + uint64_t sn; + uint32_t flow_id; + + flow_id = rte_rand_max(NUM_FLOWS); + + sn = sns[flow_id]++; + + *event = (struct rte_event) { + .queue_id = 0, + .flow_id = flow_id, + .sched_type = RTE_SCHED_TYPE_ATOMIC, + .op = RTE_EVENT_OP_NEW, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .u64 = sn + }; + } + + while (n < batch_size) + n += rte_event_enqueue_new_burst(test_app->event_dev_id, + DRIVER_PORT_ID, + events + n, + batch_size - n); + + i += batch_size; + } + + while (test_app_get_completed_events(test_app) != NUM_EVENTS) + rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0); + + rc = test_app_get_errors(test_app); + TEST_ASSERT(rc == 0, "%d errors occurred", rc); + + rc = test_app_stop(test_app); + if (rc != TEST_SUCCESS) + return rc; + + struct rte_dispatcher_stats stats; + rte_dispatcher_stats_get(test_app->dispatcher, &stats); + + TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero"); + TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES, + "Invalid dispatch count"); + TEST_ASSERT(stats.poll_count > 0, "Poll count is zero"); + + TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0, + "Never-match handler's process function has " + "been called"); + + int finalize_count = + rte_atomic_load_explicit(&test_app->finalize_count.count, + rte_memory_order_relaxed); + + TEST_ASSERT(finalize_count > 0, "Finalize count is zero"); + TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count, + "Finalize count larger than event count"); + + TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count, + "%"PRIu64" batches dequeued, but finalize called %d " + "times", stats.ev_batch_count, finalize_count); + + /* + * The event dispatcher should call often-matching match functions + * more often, and thus this never-matching match function should + * be called relatively infrequently. + */ + TEST_ASSERT(test_app->never_match_count < + (stats.ev_dispatch_count / 4), + "Never-matching match function called suspiciously often"); + + rc = test_app_reset_dispatcher_stats(test_app); + if (rc != TEST_SUCCESS) + return rc; + + return TEST_SUCCESS; +} + +static int +test_drop(void) +{ + int rc; + uint8_t unhandled_queue; + struct rte_dispatcher_stats stats; + + unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES); + + rc = test_app_start(test_app); + if (rc != TEST_SUCCESS) + return rc; + + rc = test_app_unregister_callback(test_app, unhandled_queue); + if (rc != TEST_SUCCESS) + return rc; + + struct rte_event event = { + .queue_id = unhandled_queue, + .flow_id = 0, + .sched_type = RTE_SCHED_TYPE_ATOMIC, + .op = RTE_EVENT_OP_NEW, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .u64 = 0 + }; + + do { + rc = rte_event_enqueue_burst(test_app->event_dev_id, + DRIVER_PORT_ID, &event, 1); + } while (rc == 0); + + do { + rte_dispatcher_stats_get(test_app->dispatcher, &stats); + + rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0); + } while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0); + + rc = test_app_stop(test_app); + if (rc != TEST_SUCCESS) + return rc; + + TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one"); + TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, + "Dispatch count is not zero"); + TEST_ASSERT(stats.poll_count > 0, "Poll count is zero"); + + return TEST_SUCCESS; +} + +#define MORE_THAN_MAX_HANDLERS 1000 +#define MIN_HANDLERS 32 + +static int +test_many_handler_registrations(void) +{ + int rc; + int num_regs = 0; + int reg_ids[MORE_THAN_MAX_HANDLERS]; + int reg_id; + int i; + + rc = test_app_unregister_callbacks(test_app); + if (rc != TEST_SUCCESS) + return rc; + + for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) { + reg_id = rte_dispatcher_register(test_app->dispatcher, + never_match, NULL, + test_app_never_process, NULL); + if (reg_id < 0) + break; + + reg_ids[num_regs++] = reg_id; + } + + TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected " + "%d but was %d", -ENOMEM, reg_id); + TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already " + "after %d handler registrations.", num_regs); + + for (i = 0; i < num_regs; i++) { + rc = rte_dispatcher_unregister(test_app->dispatcher, + reg_ids[i]); + TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d", + reg_ids[i]); + } + + return TEST_SUCCESS; +} + +static void +dummy_finalize(uint8_t event_dev_id __rte_unused, + uint8_t event_port_id __rte_unused, + void *cb_data __rte_unused) +{ +} + +#define MORE_THAN_MAX_FINALIZERS 1000 +#define MIN_FINALIZERS 16 + +static int +test_many_finalize_registrations(void) +{ + int rc; + int num_regs = 0; + int reg_ids[MORE_THAN_MAX_FINALIZERS]; + int reg_id; + int i; + + rc = test_app_unregister_callbacks(test_app); + if (rc != TEST_SUCCESS) + return rc; + + for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) { + reg_id = rte_dispatcher_finalize_register( + test_app->dispatcher, dummy_finalize, NULL + ); + + if (reg_id < 0) + break; + + reg_ids[num_regs++] = reg_id; + } + + TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected " + "%d but was %d", -ENOMEM, reg_id); + TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed " + "already after %d registrations.", num_regs); + + for (i = 0; i < num_regs; i++) { + rc = rte_dispatcher_finalize_unregister( + test_app->dispatcher, reg_ids[i] + ); + TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d", + reg_ids[i]); + } + + return TEST_SUCCESS; +} + +static struct unit_test_suite test_suite = { + .suite_name = "Event dispatcher test suite", + .unit_test_cases = { + TEST_CASE_ST(test_setup, test_teardown, test_basic), + TEST_CASE_ST(test_setup, test_teardown, test_drop), + TEST_CASE_ST(test_setup, test_teardown, + test_many_handler_registrations), + TEST_CASE_ST(test_setup, test_teardown, + test_many_finalize_registrations), + TEST_CASES_END() + } +}; + +static int +test_dispatcher(void) +{ + return unit_test_suite_runner(&test_suite); +} + +REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher); diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c index 3efa4af1043..6cb27f4de11 100644 --- a/app/test/test_distributor.c +++ b/app/test/test_distributor.c @@ -952,4 +952,4 @@ test_distributor(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(distributor_autotest, test_distributor); +REGISTER_FAST_TEST(distributor_autotest, false, true, test_distributor); diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c index ee4321486d8..ca868451d7b 100644 --- a/app/test/test_distributor_perf.c +++ b/app/test/test_distributor_perf.c @@ -277,4 +277,4 @@ test_distributor_perf(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(distributor_perf_autotest, test_distributor_perf); +REGISTER_PERF_TEST(distributor_perf_autotest, test_distributor_perf); diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c index 0736ff2a186..216f84b6bb4 100644 --- a/app/test/test_dmadev.c +++ b/app/test/test_dmadev.c @@ -18,11 +18,37 @@ #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0) +#define TEST_RINGSIZE 512 #define COPY_LEN 1024 static struct rte_mempool *pool; static uint16_t id_count; +enum { + TEST_PARAM_REMOTE_ADDR = 0, + TEST_PARAM_MAX, +}; + +static const char * const dma_test_param[] = { + [TEST_PARAM_REMOTE_ADDR] = "remote_addr", +}; + +static uint64_t env_test_param[TEST_PARAM_MAX]; + +enum { + TEST_M2D_AUTO_FREE = 0, + TEST_MAX, +}; + +struct dma_add_test { + const char *name; + bool enabled; +}; + +struct dma_add_test dma_add_test[] = { + [TEST_M2D_AUTO_FREE] = {.name = "m2d_auto_free", .enabled = false}, +}; + static void __rte_format_printf(3, 4) print_err(const char *func, int lineno, const char *format, ...) @@ -797,10 +823,103 @@ test_burst_capacity(int16_t dev_id, uint16_t vchan) return 0; } +static int +test_m2d_auto_free(int16_t dev_id, uint16_t vchan) +{ +#define NR_MBUF 256 + struct rte_mbuf *src[NR_MBUF], *dst[NR_MBUF]; + const struct rte_dma_vchan_conf qconf = { + .direction = RTE_DMA_DIR_MEM_TO_DEV, + .nb_desc = TEST_RINGSIZE, + .auto_free.m2d.pool = pool, + .dst_port.port_type = RTE_DMA_PORT_PCIE, + .dst_port.pcie.coreid = 0, + }; + uint32_t buf_cnt1, buf_cnt2; + struct rte_mempool_ops *ops; + static bool dev_init; + uint16_t nb_done = 0; + bool dma_err = false; + int retry = 100; + int i, ret = 0; + + if (!dev_init) { + /* Stop the device to reconfigure vchan. */ + if (rte_dma_stop(dev_id) < 0) + ERR_RETURN("Error stopping device %u\n", dev_id); + + if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0) + ERR_RETURN("Error with queue configuration\n"); + + if (rte_dma_start(dev_id) != 0) + ERR_RETURN("Error with rte_dma_start()\n"); + + dev_init = true; + } + + if (rte_pktmbuf_alloc_bulk(pool, dst, NR_MBUF) != 0) + ERR_RETURN("alloc dst mbufs failed.\n"); + + for (i = 0; i < NR_MBUF; i++) { + /* Using mbuf structure to hold remote iova address. */ + rte_mbuf_iova_set(dst[i], (rte_iova_t)env_test_param[TEST_PARAM_REMOTE_ADDR]); + dst[i]->data_off = 0; + } + + /* Capture buffer count before allocating source buffer. */ + ops = rte_mempool_get_ops(pool->ops_index); + buf_cnt1 = ops->get_count(pool); + + if (rte_pktmbuf_alloc_bulk(pool, src, NR_MBUF) != 0) { + printf("alloc src mbufs failed.\n"); + ret = -1; + goto done; + } + + if ((buf_cnt1 - NR_MBUF) != ops->get_count(pool)) { + printf("Buffer count check failed.\n"); + ret = -1; + goto done; + } + + for (i = 0; i < NR_MBUF; i++) { + ret = rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(src[i]), + rte_mbuf_data_iova(dst[i]), COPY_LEN, + RTE_DMA_OP_FLAG_AUTO_FREE); + + if (ret < 0) { + printf("rte_dma_copy returned error.\n"); + goto done; + } + } + + rte_dma_submit(dev_id, vchan); + do { + nb_done += rte_dma_completed(dev_id, vchan, (NR_MBUF - nb_done), NULL, &dma_err); + if (dma_err) + break; + /* Sleep for 1 millisecond */ + rte_delay_us_sleep(1000); + } while (retry-- && (nb_done < NR_MBUF)); + + buf_cnt2 = ops->get_count(pool); + if ((buf_cnt1 != buf_cnt2) || dma_err) { + printf("Free mem to dev buffer test failed.\n"); + ret = -1; + } + +done: + rte_pktmbuf_free_bulk(dst, NR_MBUF); + /* If the test passes source buffer will be freed in hardware. */ + if (ret < 0) + rte_pktmbuf_free_bulk(&src[nb_done], (NR_MBUF - nb_done)); + + return ret; +} + static int test_dmadev_instance(int16_t dev_id) { -#define TEST_RINGSIZE 512 #define CHECK_ERRS true struct rte_dma_stats stats; struct rte_dma_info info; @@ -890,6 +1009,13 @@ test_dmadev_instance(int16_t dev_id) else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0) goto err; + if ((info.dev_capa & RTE_DMA_CAPA_M2D_AUTO_FREE) && + dma_add_test[TEST_M2D_AUTO_FREE].enabled == true) { + if (runtest("m2d_auto_free", test_m2d_auto_free, 128, dev_id, vchan, + CHECK_ERRS) < 0) + goto err; + } + rte_mempool_free(pool); if (rte_dma_stop(dev_id) < 0) @@ -922,11 +1048,50 @@ test_apis(void) return ret; } +static void +parse_dma_env_var(void) +{ + char *dma_env_param_str = getenv("DPDK_ADD_DMA_TEST_PARAM"); + char *dma_env_test_str = getenv("DPDK_ADD_DMA_TEST"); + char *params[32] = {0}; + char *tests[32] = {0}; + char *var[2] = {0}; + int n_var = 0; + int i, j; + + /* Additional test from commandline. */ + if (dma_env_test_str && strlen(dma_env_test_str) > 0) { + n_var = rte_strsplit(dma_env_test_str, strlen(dma_env_test_str), tests, + RTE_DIM(tests), ','); + for (i = 0; i < n_var; i++) { + for (j = 0; j < TEST_MAX; j++) { + if (!strcmp(tests[i], dma_add_test[j].name)) + dma_add_test[j].enabled = true; + } + } + } + + /* Commandline variables for test */ + if (dma_env_param_str && strlen(dma_env_param_str) > 0) { + n_var = rte_strsplit(dma_env_param_str, strlen(dma_env_param_str), params, + RTE_DIM(params), ','); + for (i = 0; i < n_var; i++) { + rte_strsplit(params[i], strlen(params[i]), var, RTE_DIM(var), '='); + for (j = 0; j < TEST_PARAM_MAX; j++) { + if (!strcmp(var[0], dma_test_param[j])) + env_test_param[j] = strtoul(var[1], NULL, 16); + } + } + } +} + static int test_dma(void) { int i; + parse_dma_env_var(); + /* basic sanity on dmadev infrastructure */ if (test_apis() < 0) ERR_RETURN("Error performing API tests\n"); @@ -941,4 +1106,4 @@ test_dma(void) return 0; } -REGISTER_TEST_COMMAND(dmadev_autotest, test_dma); +REGISTER_DRIVER_TEST(dmadev_autotest, test_dma); diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c index 148e9098e81..6cb4b067573 100644 --- a/app/test/test_eal_flags.c +++ b/app/test/test_eal_flags.c @@ -1644,15 +1644,15 @@ test_memory_flags(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(eal_flags_c_opt_autotest, test_missing_c_flag); -REGISTER_TEST_COMMAND(eal_flags_main_opt_autotest, test_main_lcore_flag); -REGISTER_TEST_COMMAND(eal_flags_n_opt_autotest, test_invalid_n_flag); -REGISTER_TEST_COMMAND(eal_flags_hpet_autotest, test_no_hpet_flag); -REGISTER_TEST_COMMAND(eal_flags_no_huge_autotest, test_no_huge_flag); -REGISTER_TEST_COMMAND(eal_flags_a_opt_autotest, test_allow_flag); -REGISTER_TEST_COMMAND(eal_flags_b_opt_autotest, test_invalid_b_flag); -REGISTER_TEST_COMMAND(eal_flags_vdev_opt_autotest, test_invalid_vdev_flag); -REGISTER_TEST_COMMAND(eal_flags_r_opt_autotest, test_invalid_r_flag); -REGISTER_TEST_COMMAND(eal_flags_mem_autotest, test_memory_flags); -REGISTER_TEST_COMMAND(eal_flags_file_prefix_autotest, test_file_prefix); -REGISTER_TEST_COMMAND(eal_flags_misc_autotest, test_misc_flags); +REGISTER_FAST_TEST(eal_flags_c_opt_autotest, false, false, test_missing_c_flag); +REGISTER_FAST_TEST(eal_flags_main_opt_autotest, false, false, test_main_lcore_flag); +REGISTER_FAST_TEST(eal_flags_n_opt_autotest, false, false, test_invalid_n_flag); +REGISTER_FAST_TEST(eal_flags_hpet_autotest, false, false, test_no_hpet_flag); +REGISTER_FAST_TEST(eal_flags_no_huge_autotest, false, false, test_no_huge_flag); +REGISTER_FAST_TEST(eal_flags_a_opt_autotest, false, false, test_allow_flag); +REGISTER_FAST_TEST(eal_flags_b_opt_autotest, false, false, test_invalid_b_flag); +REGISTER_FAST_TEST(eal_flags_vdev_opt_autotest, false, false, test_invalid_vdev_flag); +REGISTER_FAST_TEST(eal_flags_r_opt_autotest, false, false, test_invalid_r_flag); +REGISTER_FAST_TEST(eal_flags_mem_autotest, false, false, test_memory_flags); +REGISTER_FAST_TEST(eal_flags_file_prefix_autotest, false, false, test_file_prefix); +REGISTER_FAST_TEST(eal_flags_misc_autotest, false, false, test_misc_flags); diff --git a/app/test/test_eal_fs.c b/app/test/test_eal_fs.c index b3686edcb45..8cd287fa9d8 100644 --- a/app/test/test_eal_fs.c +++ b/app/test/test_eal_fs.c @@ -185,4 +185,4 @@ test_eal_fs(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(eal_fs_autotest, test_eal_fs); +REGISTER_FAST_TEST(eal_fs_autotest, true, true, test_eal_fs); diff --git a/app/test/test_efd.c b/app/test/test_efd.c index fa29e8f97aa..1c0986b9bcd 100644 --- a/app/test/test_efd.c +++ b/app/test/test_efd.c @@ -473,4 +473,4 @@ test_efd(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(efd_autotest, test_efd); +REGISTER_PERF_TEST(efd_autotest, test_efd); diff --git a/app/test/test_efd_perf.c b/app/test/test_efd_perf.c index 4d04ed93e35..b212e96767a 100644 --- a/app/test/test_efd_perf.c +++ b/app/test/test_efd_perf.c @@ -393,4 +393,4 @@ test_efd_perf(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(efd_perf_autotest, test_efd_perf); +REGISTER_PERF_TEST(efd_perf_autotest, test_efd_perf); diff --git a/app/test/test_errno.c b/app/test/test_errno.c index 0db4fbc8b31..b429962fb92 100644 --- a/app/test/test_errno.c +++ b/app/test/test_errno.c @@ -94,4 +94,4 @@ test_errno(void) return 0; } -REGISTER_TEST_COMMAND(errno_autotest, test_errno); +REGISTER_FAST_TEST(errno_autotest, true, true, test_errno); diff --git a/app/test/test_ethdev_api.c b/app/test/test_ethdev_api.c new file mode 100644 index 00000000000..00d6a5c6145 --- /dev/null +++ b/app/test/test_ethdev_api.c @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2023, Advanced Micro Devices, Inc. + */ + +#include +#include + +#include +#include "test.h" + +#define NUM_RXQ 2 +#define NUM_TXQ 2 +#define NUM_RXD 512 +#define NUM_TXD 512 +#define NUM_MBUF 1024 +#define MBUF_CACHE_SIZE 256 + +static int32_t +ethdev_api_queue_status(void) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_rxq_info rx_qinfo; + struct rte_eth_txq_info tx_qinfo; + struct rte_mempool *mbuf_pool; + struct rte_eth_conf eth_conf; + uint16_t port_id; + int ret; + + if (rte_eth_dev_count_avail() == 0) + return TEST_SKIPPED; + + mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUF, MBUF_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); + + RTE_ETH_FOREACH_DEV(port_id) { + memset(ð_conf, 0, sizeof(eth_conf)); + ret = rte_eth_dev_configure(port_id, NUM_RXQ, NUM_TXQ, ð_conf); + TEST_ASSERT(ret == 0, + "Port(%u) failed to configure.\n", port_id); + + /* RxQ setup */ + for (uint16_t queue_id = 0; queue_id < NUM_RXQ; queue_id++) { + ret = rte_eth_rx_queue_setup(port_id, queue_id, NUM_RXD, + rte_socket_id(), NULL, mbuf_pool); + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to setup RxQ.\n", + port_id, queue_id); + } + + /* TxQ setup */ + for (uint16_t queue_id = 0; queue_id < NUM_TXQ; queue_id++) { + ret = rte_eth_tx_queue_setup(port_id, queue_id, NUM_TXD, + rte_socket_id(), NULL); + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to setup TxQ.\n", + port_id, queue_id); + } + + ret = rte_eth_dev_info_get(port_id, &dev_info); + TEST_ASSERT(ret == 0, + "Port(%u) failed to get dev info.\n", port_id); + + /* Initial RxQ */ + for (uint16_t queue_id = 0; queue_id < dev_info.nb_rx_queues; queue_id++) { + ret = rte_eth_rx_queue_info_get(port_id, queue_id, &rx_qinfo); + if (ret == -ENOTSUP) + continue; + + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to get RxQ info.\n", + port_id, queue_id); + + TEST_ASSERT(rx_qinfo.queue_state == RTE_ETH_QUEUE_STATE_STOPPED, + "Wrong initial Rx queue(%u) state(%d)\n", + queue_id, rx_qinfo.queue_state); + } + + /* Initial TxQ */ + for (uint16_t queue_id = 0; queue_id < dev_info.nb_tx_queues; queue_id++) { + ret = rte_eth_tx_queue_info_get(port_id, queue_id, &tx_qinfo); + if (ret == -ENOTSUP) + continue; + + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to get TxQ info.\n", + port_id, queue_id); + + TEST_ASSERT(tx_qinfo.queue_state == RTE_ETH_QUEUE_STATE_STOPPED, + "Wrong initial Tx queue(%u) state(%d)\n", + queue_id, tx_qinfo.queue_state); + } + + ret = rte_eth_dev_start(port_id); + TEST_ASSERT(ret == 0, + "Port(%u) failed to start.\n", port_id); + + /* Started RxQ */ + for (uint16_t queue_id = 0; queue_id < dev_info.nb_rx_queues; queue_id++) { + ret = rte_eth_rx_queue_info_get(port_id, queue_id, &rx_qinfo); + if (ret == -ENOTSUP) + continue; + + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to get RxQ info.\n", + port_id, queue_id); + + TEST_ASSERT(rx_qinfo.queue_state == RTE_ETH_QUEUE_STATE_STARTED, + "Wrong started Rx queue(%u) state(%d)\n", + queue_id, rx_qinfo.queue_state); + } + + /* Started TxQ */ + for (uint16_t queue_id = 0; queue_id < dev_info.nb_tx_queues; queue_id++) { + ret = rte_eth_tx_queue_info_get(port_id, queue_id, &tx_qinfo); + if (ret == -ENOTSUP) + continue; + + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to get TxQ info.\n", + port_id, queue_id); + + TEST_ASSERT(tx_qinfo.queue_state == RTE_ETH_QUEUE_STATE_STARTED, + "Wrong started Tx queue(%u) state(%d)\n", + queue_id, tx_qinfo.queue_state); + } + + ret = rte_eth_dev_stop(port_id); + TEST_ASSERT(ret == 0, + "Port(%u) failed to stop.\n", port_id); + + /* Stopped RxQ */ + for (uint16_t queue_id = 0; queue_id < dev_info.nb_rx_queues; queue_id++) { + ret = rte_eth_rx_queue_info_get(port_id, queue_id, &rx_qinfo); + if (ret == -ENOTSUP) + continue; + + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to get RxQ info.\n", + port_id, queue_id); + + TEST_ASSERT(rx_qinfo.queue_state == RTE_ETH_QUEUE_STATE_STOPPED, + "Wrong stopped Rx queue(%u) state(%d)\n", + queue_id, rx_qinfo.queue_state); + } + + /* Stopped TxQ */ + for (uint16_t queue_id = 0; queue_id < dev_info.nb_tx_queues; queue_id++) { + ret = rte_eth_tx_queue_info_get(port_id, queue_id, &tx_qinfo); + if (ret == -ENOTSUP) + continue; + + TEST_ASSERT(ret == 0, + "Port(%u), queue(%u) failed to get TxQ info.\n", + port_id, queue_id); + + TEST_ASSERT(tx_qinfo.queue_state == RTE_ETH_QUEUE_STATE_STOPPED, + "Wrong stopped Tx queue(%u) state(%d)\n", + queue_id, tx_qinfo.queue_state); + } + } + + return TEST_SUCCESS; +} + +static struct unit_test_suite ethdev_api_testsuite = { + .suite_name = "ethdev API tests", + .setup = NULL, + .teardown = NULL, + .unit_test_cases = { + TEST_CASE(ethdev_api_queue_status), + /* TODO: Add deferred_start queue status test */ + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_ethdev_api(void) +{ + rte_log_set_global_level(RTE_LOG_DEBUG); + rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG); + + return unit_test_suite_runner(ðdev_api_testsuite); +} + +/* TODO: Make part of the fast test suite, `REGISTER_FAST_TEST()`, + * when all drivers complies to the queue state requirement + */ +REGISTER_TEST_COMMAND(ethdev_api, test_ethdev_api); diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c index ab52385a120..f063a5fe269 100644 --- a/app/test/test_ethdev_link.c +++ b/app/test/test_ethdev_link.c @@ -167,4 +167,4 @@ test_link_status(void) return unit_test_suite_runner(&link_status_testsuite); } -REGISTER_TEST_COMMAND(ethdev_link_status, test_link_status); +REGISTER_FAST_TEST(ethdev_link_status, true, true, test_link_status); diff --git a/app/test/test_event_crypto_adapter.c b/app/test/test_event_crypto_adapter.c index 24f66be69f4..0c56744ba03 100644 --- a/app/test/test_event_crypto_adapter.c +++ b/app/test/test_event_crypto_adapter.c @@ -284,6 +284,9 @@ test_crypto_adapter_params(void) }; err = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + if (err == -ENOTSUP) + return TEST_SKIPPED; + TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n"); if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { @@ -1069,11 +1072,10 @@ configure_cryptodev(void) return TEST_FAILED; } - /* Create a NULL crypto device */ - nb_devs = rte_cryptodev_device_count_by_driver( - rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_NULL_PMD))); + + nb_devs = rte_cryptodev_count(); if (!nb_devs) { + /* Create a NULL crypto device */ ret = rte_vdev_init( RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); @@ -1252,8 +1254,13 @@ test_crypto_adapter_create(void) .enqueue_depth = 8, .new_event_threshold = 1200, }; + uint32_t cap; int ret; + ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + if (ret == -ENOTSUP) + return ret; + /* Create adapter with default port creation callback */ ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID, evdev, @@ -1274,6 +1281,9 @@ test_crypto_adapter_qp_add_del(void) int ret; ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { @@ -1309,6 +1319,9 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode) int ret; ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); + if (ret == -ENOTSUP) + return ret; + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); /* Skip mode and capability mismatch check for SW eventdev */ @@ -1475,6 +1488,9 @@ crypto_adapter_teardown(void) { int ret; + if (!crypto_adapter_setup_done) + return; + ret = rte_event_crypto_adapter_stop(TEST_ADAPTER_ID); if (ret < 0) RTE_LOG(ERR, USER1, "Failed to stop adapter!"); diff --git a/app/test/test_event_dma_adapter.c b/app/test/test_event_dma_adapter.c new file mode 100644 index 00000000000..35b417b69f7 --- /dev/null +++ b/app/test/test_event_dma_adapter.c @@ -0,0 +1,805 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Marvell. + */ + +#include "test.h" +#include +#include +#include +#include +#include +#include + +#ifdef RTE_EXEC_ENV_WINDOWS +static int +test_event_dma_adapter(void) +{ + printf("event_dma_adapter not supported on Windows, skipping test\n"); + return TEST_SKIPPED; +} + +#else + +#include +#include +#include +#include +#include + +#define NUM_MBUFS (8191) +#define MBUF_CACHE_SIZE (256) +#define TEST_APP_PORT_ID 0 +#define TEST_APP_EV_QUEUE_ID 0 +#define TEST_APP_EV_PRIORITY 0 +#define TEST_APP_EV_FLOWID 0xAABB +#define TEST_DMA_EV_QUEUE_ID 1 +#define TEST_ADAPTER_ID 0 +#define TEST_DMA_DEV_ID 0 +#define TEST_DMA_VCHAN_ID 0 +#define PACKET_LENGTH 1024 +#define NB_TEST_PORTS 1 +#define NB_TEST_QUEUES 2 +#define NUM_CORES 2 +#define DMA_OP_POOL_SIZE 128 +#define TEST_MAX_OP 32 +#define TEST_RINGSIZE 512 + +#define MBUF_SIZE (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH) + +/* Handle log statements in same manner as test macros */ +#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) + +struct event_dma_adapter_test_params { + struct rte_mempool *src_mbuf_pool; + struct rte_mempool *dst_mbuf_pool; + struct rte_mempool *op_mpool; + uint8_t dma_event_port_id; + uint8_t internal_port_op_fwd; +}; + +struct rte_event dma_response_info = { + .queue_id = TEST_APP_EV_QUEUE_ID, + .sched_type = RTE_SCHED_TYPE_ATOMIC, + .flow_id = TEST_APP_EV_FLOWID, + .priority = TEST_APP_EV_PRIORITY +}; + +static struct event_dma_adapter_test_params params; +static uint8_t dma_adapter_setup_done; +static uint32_t slcore_id; +static int evdev; + +static int +send_recv_ev(struct rte_event *ev) +{ + struct rte_event recv_ev[TEST_MAX_OP]; + uint16_t nb_enqueued = 0; + int i = 0; + + if (params.internal_port_op_fwd) { + nb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev, + TEST_MAX_OP); + } else { + while (nb_enqueued < TEST_MAX_OP) { + nb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, + &ev[nb_enqueued], TEST_MAX_OP - + nb_enqueued); + } + } + + TEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, "Failed to send event to dma adapter\n"); + + while (i < TEST_MAX_OP) { + if (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1) + continue; + i++; + } + + TEST_ASSERT_EQUAL(i, TEST_MAX_OP, "Test failed. Failed to dequeue events.\n"); + + return TEST_SUCCESS; +} + +static int +test_dma_adapter_stats(void) +{ + struct rte_event_dma_adapter_stats stats; + + rte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats); + printf(" +------------------------------------------------------+\n"); + printf(" + DMA adapter stats for instance %u:\n", TEST_ADAPTER_ID); + printf(" + Event port poll count 0x%" PRIx64 "\n", + stats.event_poll_count); + printf(" + Event dequeue count 0x%" PRIx64 "\n", + stats.event_deq_count); + printf(" + DMA dev enqueue count 0x%" PRIx64 "\n", + stats.dma_enq_count); + printf(" + DMA dev enqueue failed count 0x%" PRIx64 "\n", + stats.dma_enq_fail_count); + printf(" + DMA dev dequeue count 0x%" PRIx64 "\n", + stats.dma_deq_count); + printf(" + Event enqueue count 0x%" PRIx64 "\n", + stats.event_enq_count); + printf(" + Event enqueue retry count 0x%" PRIx64 "\n", + stats.event_enq_retry_count); + printf(" + Event enqueue fail count 0x%" PRIx64 "\n", + stats.event_enq_fail_count); + printf(" +------------------------------------------------------+\n"); + + rte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID); + return TEST_SUCCESS; +} + +static int +test_dma_adapter_params(void) +{ + struct rte_event_dma_adapter_runtime_params out_params; + struct rte_event_dma_adapter_runtime_params in_params; + struct rte_event event; + uint32_t cap; + int err, rc; + + err = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); + TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) { + err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID, &event); + } else + err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID, NULL); + + TEST_ASSERT_SUCCESS(err, "Failed to add vchan\n"); + + err = rte_event_dma_adapter_runtime_params_init(&in_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + err = rte_event_dma_adapter_runtime_params_init(&out_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + /* Case 1: Get the default value of mbufs processed by adapter */ + err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); + if (err == -ENOTSUP) { + rc = TEST_SKIPPED; + goto vchan_del; + } + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + /* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */ + in_params.max_nb = 32; + + err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", + in_params.max_nb, out_params.max_nb); + + /* Case 3: Set max_nb = 192 */ + in_params.max_nb = 192; + + err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", + in_params.max_nb, out_params.max_nb); + + /* Case 4: Set max_nb = 256 */ + in_params.max_nb = 256; + + err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", + in_params.max_nb, out_params.max_nb); + + /* Case 5: Set max_nb = 30(src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0); + op->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0); + + /* Update Op */ + op->src_seg->addr = rte_pktmbuf_iova(src_mbuf[i]); + op->dst_seg->addr = rte_pktmbuf_iova(dst_mbuf[i]); + op->src_seg->length = PACKET_LENGTH; + op->dst_seg->length = PACKET_LENGTH; + op->nb_src = 1; + op->nb_dst = 1; + op->flags = RTE_DMA_OP_FLAG_SUBMIT; + op->op_mp = params.op_mpool; + op->dma_dev_id = TEST_DMA_DEV_ID; + op->vchan = TEST_DMA_VCHAN_ID; + + response_info.event = dma_response_info.event; + rte_memcpy((uint8_t *)op + sizeof(struct rte_event_dma_adapter_op), &response_info, + sizeof(struct rte_event)); + + /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */ + memset(&ev[i], 0, sizeof(struct rte_event)); + ev[i].event = 0; + ev[i].event_type = RTE_EVENT_TYPE_DMADEV; + ev[i].queue_id = TEST_DMA_EV_QUEUE_ID; + ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC; + ev[i].flow_id = 0xAABB; + ev[i].event_ptr = op; + } + + ret = send_recv_ev(ev); + TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to dma adapter\n"); + + test_dma_adapter_stats(); + + for (i = 0; i < TEST_MAX_OP; i++) { + op = ev[i].event_ptr; + ret = memcmp(rte_pktmbuf_mtod(src_mbuf[i], void *), + rte_pktmbuf_mtod(dst_mbuf[i], void *), PACKET_LENGTH); + + TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n"); + + rte_free(op->src_seg); + rte_free(op->dst_seg); + rte_mempool_put(op->op_mp, op); + } + + rte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP); + rte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP); + + return TEST_SUCCESS; +} + +static int +map_adapter_service_core(void) +{ + uint32_t adapter_service_id; + int ret; + + if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) { + uint32_t core_list[NUM_CORES]; + + ret = rte_service_lcore_list(core_list, NUM_CORES); + TEST_ASSERT(ret >= 0, "Failed to get service core list!"); + + if (core_list[0] != slcore_id) { + TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), + "Failed to add service core"); + TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), + "Failed to start service core"); + } + + TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( + adapter_service_id, slcore_id, 1), + "Failed to map adapter service"); + } + + return TEST_SUCCESS; +} + +static int +test_with_op_forward_mode(void) +{ + uint32_t cap; + int ret; + + ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && + !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) + map_adapter_service_core(); + else { + if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) + return TEST_SKIPPED; + } + + TEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID), + "Failed to start event dma adapter"); + + ret = test_op_forward_mode(); + TEST_ASSERT_SUCCESS(ret, "DMA - FORWARD mode test failed\n"); + return TEST_SUCCESS; +} + +static int +configure_dmadev(void) +{ + const struct rte_dma_conf conf = { .nb_vchans = 1}; + const struct rte_dma_vchan_conf qconf = { + .direction = RTE_DMA_DIR_MEM_TO_MEM, + .nb_desc = TEST_RINGSIZE, + }; + struct rte_dma_info info; + unsigned int elt_size; + int ret; + + ret = rte_dma_count_avail(); + RTE_TEST_ASSERT_FAIL(ret, "No dma devices found!\n"); + + ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info); + TEST_ASSERT_SUCCESS(ret, "Error with rte_dma_info_get()\n"); + + if (info.max_vchans < 1) + RTE_LOG(ERR, USER1, "Error, no channels available on device id %u\n", + TEST_DMA_DEV_ID); + + if (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0) + RTE_LOG(ERR, USER1, "Error with rte_dma_configure()\n"); + + if (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0) + RTE_LOG(ERR, USER1, "Error with vchan configuration\n"); + + ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info); + if (ret != 0 || info.nb_vchans != 1) + RTE_LOG(ERR, USER1, "Error, no configured vhcan reported on device id %u\n", + TEST_DMA_DEV_ID); + + params.src_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_SRC_MBUFPOOL", NUM_MBUFS, + MBUF_CACHE_SIZE, 0, MBUF_SIZE, + rte_socket_id()); + RTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, "Can't create DMA_SRC_MBUFPOOL\n"); + + params.dst_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_DST_MBUFPOOL", NUM_MBUFS, + MBUF_CACHE_SIZE, 0, MBUF_SIZE, + rte_socket_id()); + RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n"); + + elt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event); + params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0, + 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0); + RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n"); + + return TEST_SUCCESS; +} + +static inline void +evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info) +{ + memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); + dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; + dev_conf->nb_event_ports = NB_TEST_PORTS; + dev_conf->nb_event_queues = NB_TEST_QUEUES; + dev_conf->nb_event_queue_flows = info->max_event_queue_flows; + dev_conf->nb_event_port_dequeue_depth = + info->max_event_port_dequeue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_events_limit = + info->max_num_events; +} + +static int +configure_eventdev(void) +{ + struct rte_event_queue_conf queue_conf; + struct rte_event_dev_config devconf; + struct rte_event_dev_info info; + uint32_t queue_count; + uint32_t port_count; + uint8_t qid; + int ret; + + if (!rte_event_dev_count()) { + /* If there is no hardware eventdev, or no software vdev was + * specified on the command line, create an instance of + * event_sw. + */ + LOG_DBG("Failed to find a valid event device... " + "testing with event_sw device\n"); + TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), + "Error creating eventdev"); + evdev = rte_event_dev_get_dev_id("event_sw0"); + } + + ret = rte_event_dev_info_get(evdev, &info); + TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n"); + + evdev_set_conf_values(&devconf, &info); + + ret = rte_event_dev_configure(evdev, &devconf); + TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n"); + + /* Set up event queue */ + ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count); + TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n"); + TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n"); + + qid = TEST_APP_EV_QUEUE_ID; + ret = rte_event_queue_setup(evdev, qid, NULL); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid); + + queue_conf.nb_atomic_flows = info.max_event_queue_flows; + queue_conf.nb_atomic_order_sequences = 32; + queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; + queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; + queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; + + qid = TEST_DMA_EV_QUEUE_ID; + ret = rte_event_queue_setup(evdev, qid, &queue_conf); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid); + + /* Set up event port */ + ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT, + &port_count); + TEST_ASSERT_SUCCESS(ret, "Port count get failed\n"); + TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n"); + + ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL); + TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n", + TEST_APP_PORT_ID); + + qid = TEST_APP_EV_QUEUE_ID; + ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1); + TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n", + TEST_APP_PORT_ID); + + return TEST_SUCCESS; +} + +static void +test_dma_adapter_free(void) +{ + rte_event_dma_adapter_free(TEST_ADAPTER_ID); +} + +static int +test_dma_adapter_create(void) +{ + struct rte_event_dev_info evdev_info = {0}; + struct rte_event_port_conf conf = {0}; + int ret; + + ret = rte_event_dev_info_get(evdev, &evdev_info); + TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); + + conf.new_event_threshold = evdev_info.max_num_events; + conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth; + conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth; + + /* Create adapter with default port creation callback */ + ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0); + TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); + + return TEST_SUCCESS; +} + +static int +test_dma_adapter_vchan_add_del(void) +{ + struct rte_event event; + uint32_t cap; + int ret; + + ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) { + ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID, &event); + } else + ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID, NULL); + + TEST_ASSERT_SUCCESS(ret, "Failed to create add vchan\n"); + + ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID); + TEST_ASSERT_SUCCESS(ret, "Failed to delete vchan\n"); + + return TEST_SUCCESS; +} + +static int +configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode) +{ + struct rte_event_dev_info evdev_info = {0}; + struct rte_event_port_conf conf = {0}; + struct rte_event event; + uint32_t cap; + int ret; + + ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + /* Skip mode and capability mismatch check for SW eventdev */ + if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) && + !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && + !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND)) + goto adapter_create; + + if (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) { + if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) + params.internal_port_op_fwd = 1; + else + return -ENOTSUP; + } + +adapter_create: + ret = rte_event_dev_info_get(evdev, &evdev_info); + TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); + + conf.new_event_threshold = evdev_info.max_num_events; + conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth; + conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth; + + /* Create adapter with default port creation callback */ + ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode); + TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); + + if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) { + ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID, &event); + } else + ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID, NULL); + + TEST_ASSERT_SUCCESS(ret, "Failed to add vchan\n"); + + if (!params.internal_port_op_fwd) { + ret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID, + ¶ms.dma_event_port_id); + TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n"); + } + + return TEST_SUCCESS; +} + +static void +test_dma_adapter_stop(void) +{ + uint32_t evdev_service_id, adapter_service_id; + + /* retrieve service ids & stop services */ + if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, + &adapter_service_id) == 0) { + rte_service_runstate_set(adapter_service_id, 0); + rte_service_lcore_stop(slcore_id); + rte_service_lcore_del(slcore_id); + rte_event_dma_adapter_stop(TEST_ADAPTER_ID); + } + + if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { + rte_service_runstate_set(evdev_service_id, 0); + rte_service_lcore_stop(slcore_id); + rte_service_lcore_del(slcore_id); + rte_dma_stop(TEST_DMA_DEV_ID); + rte_event_dev_stop(evdev); + } else { + rte_dma_stop(TEST_DMA_DEV_ID); + rte_event_dev_stop(evdev); + } +} + +static int +test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode) +{ + uint32_t evdev_service_id; + uint8_t qid; + int ret; + + if (!dma_adapter_setup_done) { + ret = configure_event_dma_adapter(mode); + if (ret) + return ret; + if (!params.internal_port_op_fwd) { + qid = TEST_DMA_EV_QUEUE_ID; + ret = rte_event_port_link(evdev, + params.dma_event_port_id, &qid, NULL, 1); + TEST_ASSERT(ret >= 0, "Failed to link queue %d " + "port=%u\n", qid, + params.dma_event_port_id); + } + dma_adapter_setup_done = 1; + } + + /* retrieve service ids */ + if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { + /* add a service core and start it */ + TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), + "Failed to add service core"); + TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), + "Failed to start service core"); + + /* map services to it */ + TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id, + slcore_id, 1), "Failed to map evdev service"); + + /* set services to running */ + TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id, + 1), "Failed to start evdev service"); + } + + /* start the eventdev */ + TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev), + "Failed to start event device"); + + /* start the dma dev */ + TEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID), + "Failed to start dma device"); + + return TEST_SUCCESS; +} + +static int +test_dma_adapter_conf_op_forward_mode(void) +{ + enum rte_event_dma_adapter_mode mode; + + mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD; + + return test_dma_adapter_conf(mode); +} + +static int +testsuite_setup(void) +{ + int ret; + + slcore_id = rte_get_next_lcore(-1, 1, 0); + TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores " + "are required to run this autotest\n"); + + /* Setup and start event device. */ + ret = configure_eventdev(); + TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n"); + + /* Setup and start dma device. */ + ret = configure_dmadev(); + TEST_ASSERT_SUCCESS(ret, "dmadev initialization failed\n"); + + return TEST_SUCCESS; +} + +static void +dma_adapter_teardown(void) +{ + int ret; + + ret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID); + if (ret < 0) + RTE_LOG(ERR, USER1, "Failed to stop adapter!"); + + ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, + TEST_DMA_VCHAN_ID); + if (ret < 0) + RTE_LOG(ERR, USER1, "Failed to delete vchan!"); + + ret = rte_event_dma_adapter_free(TEST_ADAPTER_ID); + if (ret < 0) + RTE_LOG(ERR, USER1, "Failed to free adapter!"); + + dma_adapter_setup_done = 0; +} + +static void +dma_teardown(void) +{ + /* Free mbuf mempool */ + if (params.src_mbuf_pool != NULL) { + RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_SRC_MBUFPOOL count %u\n", + rte_mempool_avail_count(params.src_mbuf_pool)); + rte_mempool_free(params.src_mbuf_pool); + params.src_mbuf_pool = NULL; + } + + if (params.dst_mbuf_pool != NULL) { + RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_DST_MBUFPOOL count %u\n", + rte_mempool_avail_count(params.dst_mbuf_pool)); + rte_mempool_free(params.dst_mbuf_pool); + params.dst_mbuf_pool = NULL; + } + + /* Free ops mempool */ + if (params.op_mpool != NULL) { + RTE_LOG(DEBUG, USER1, "EVENT_DMA_OP_POOL count %u\n", + rte_mempool_avail_count(params.op_mpool)); + rte_mempool_free(params.op_mpool); + params.op_mpool = NULL; + } +} + +static void +eventdev_teardown(void) +{ + rte_event_dev_stop(evdev); +} + +static void +testsuite_teardown(void) +{ + dma_adapter_teardown(); + dma_teardown(); + eventdev_teardown(); +} + +static struct unit_test_suite functional_testsuite = { + .suite_name = "Event dma adapter test suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + + TEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create), + + TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free, + test_dma_adapter_vchan_add_del), + + TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free, + test_dma_adapter_stats), + + TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free, + test_dma_adapter_params), + + TEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop, + test_with_op_forward_mode), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_event_dma_adapter(void) +{ + return unit_test_suite_runner(&functional_testsuite); +} + +#endif /* !RTE_EXEC_ENV_WINDOWS */ + +REGISTER_DRIVER_TEST(event_dma_adapter_autotest, test_event_dma_adapter); diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 52d146f97c5..0233c877796 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -407,6 +407,12 @@ adapter_create(void) return err; } +static void +adapter_free(void) +{ + rte_event_eth_rx_adapter_free(TEST_INST_ID); +} + static int adapter_create_with_params(void) { @@ -427,6 +433,13 @@ adapter_create_with_params(void) rxa_params.use_queue_event_buf = false; rxa_params.event_buf_size = 0; + /* Pass rxa_params = NULL */ + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, NULL); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + if (err == 0) + adapter_free(); + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, TEST_DEV_ID, &rx_p_conf, &rxa_params); TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); @@ -444,6 +457,98 @@ adapter_create_with_params(void) return TEST_SUCCESS; } +static int +test_port_conf_cb(uint8_t id, uint8_t event_dev_id, + struct rte_event_eth_rx_adapter_conf *conf, + void *conf_arg) +{ + struct rte_event_port_conf *port_conf, def_port_conf = {0}; + uint32_t started; + static int port_allocated; + static uint8_t port_id; + int ret; + + if (port_allocated) { + conf->event_port_id = port_id; + conf->max_nb_rx = 128; + return 0; + } + + RTE_SET_USED(id); + + ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, + &started); + if (ret < 0) + return ret; + + if (started) + rte_event_dev_stop(event_dev_id); + + port_id = 1; + + if (conf_arg != NULL) + port_conf = conf_arg; + else { + port_conf = &def_port_conf; + ret = rte_event_port_default_conf_get(event_dev_id, port_id, + port_conf); + if (ret < 0) + return ret; + } + + ret = rte_event_port_setup(event_dev_id, port_id, port_conf); + if (ret < 0) + return ret; + + conf->event_port_id = port_id; + conf->max_nb_rx = 128; + + if (started) + rte_event_dev_start(event_dev_id); + + /* Reuse this port number next time this is called */ + port_allocated = 1; + + return 0; +} + +static int +adapter_create_ext_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_eth_rx_adapter_params rxa_params; + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + /* Pass rxa_params = NULL */ + err = rte_event_eth_rx_adapter_create_ext_with_params(TEST_INST_ID, + TEST_DEV_ID, test_port_conf_cb, NULL, NULL); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + if (err == 0) + adapter_free(); + + err = rte_event_eth_rx_adapter_create_ext_with_params(TEST_INST_ID, + TEST_DEV_ID, test_port_conf_cb, NULL, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.event_buf_size = 128; + + err = rte_event_eth_rx_adapter_create_ext_with_params(TEST_INST_ID, + TEST_DEV_ID, test_port_conf_cb, NULL, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_ext_with_params(TEST_INST_ID, + TEST_DEV_ID, test_port_conf_cb, NULL, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + static int adapter_queue_event_buf_test(void) { @@ -549,12 +654,6 @@ adapter_queue_stats_test(void) return TEST_SUCCESS; } -static void -adapter_free(void) -{ - rte_event_eth_rx_adapter_free(TEST_INST_ID); -} - static int adapter_create_free(void) { @@ -1337,6 +1436,8 @@ static struct unit_test_suite event_eth_rx_tests = { adapter_pollq_instance_get), TEST_CASE_ST(adapter_create, adapter_free, adapter_get_set_params), + TEST_CASE_ST(adapter_create_ext_with_params, adapter_free, + adapter_start_stop), TEST_CASES_END() /**< NULL terminate unit test array */ } }; diff --git a/app/test/test_event_eth_tx_adapter.c b/app/test/test_event_eth_tx_adapter.c index 616c972ac0a..dbd22f68007 100644 --- a/app/test/test_event_eth_tx_adapter.c +++ b/app/test/test_event_eth_tx_adapter.c @@ -1006,5 +1006,4 @@ test_event_eth_tx_adapter_common(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest, - test_event_eth_tx_adapter_common); +REGISTER_FAST_TEST(event_eth_tx_adapter_autotest, false, true, test_event_eth_tx_adapter_common); diff --git a/app/test/test_event_ring.c b/app/test/test_event_ring.c index bc4a6e73f5b..3bfb5109b77 100644 --- a/app/test/test_event_ring.c +++ b/app/test/test_event_ring.c @@ -256,4 +256,4 @@ test_event_ring(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(event_ring_autotest, test_event_ring); +REGISTER_FAST_TEST(event_ring_autotest, true, true, test_event_ring); diff --git a/app/test/test_event_timer_adapter.c b/app/test/test_event_timer_adapter.c index 510bebcf86b..2bc2e026a9a 100644 --- a/app/test/test_event_timer_adapter.c +++ b/app/test/test_event_timer_adapter.c @@ -1944,9 +1944,9 @@ test_timer_ticks_remaining(void) TEST_ASSERT_EQUAL(timeout_event_dequeue(&ev, 1, WAIT_TICKS(1)), 1, "Armed timer failed to trigger."); - TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_NOT_ARMED, - "Improper timer state set expected %d returned %d", - RTE_EVENT_TIMER_NOT_ARMED, ev_tim->state); + + if (ev_tim->state != RTE_EVENT_TIMER_NOT_ARMED) + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; /* Test that timer that fired returns error */ TEST_ASSERT_FAIL(rte_event_timer_remaining_ticks_get(timdev, ev_tim, diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c index 336529038e1..993e49af3b4 100644 --- a/app/test/test_eventdev.c +++ b/app/test/test_eventdev.c @@ -1129,6 +1129,121 @@ test_eventdev_link_get(void) return TEST_SUCCESS; } +static int +test_eventdev_profile_switch(void) +{ +#define MAX_RETRIES 4 + uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; + struct rte_event_queue_conf qcfg; + struct rte_event_port_conf pcfg; + struct rte_event_dev_info info; + struct rte_event ev; + uint8_t q, re; + int rc; + + rte_event_dev_info_get(TEST_DEV_ID, &info); + + if (info.max_profiles_per_port <= 1) + return TEST_SKIPPED; + + if (info.max_event_queues <= 1) + return TEST_SKIPPED; + + rc = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pcfg); + TEST_ASSERT_SUCCESS(rc, "Failed to get port0 default config"); + rc = rte_event_port_setup(TEST_DEV_ID, 0, &pcfg); + TEST_ASSERT_SUCCESS(rc, "Failed to setup port0"); + + rc = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qcfg); + TEST_ASSERT_SUCCESS(rc, "Failed to get queue0 default config"); + rc = rte_event_queue_setup(TEST_DEV_ID, 0, &qcfg); + TEST_ASSERT_SUCCESS(rc, "Failed to setup queue0"); + + q = 0; + rc = rte_event_port_profile_links_set(TEST_DEV_ID, 0, &q, NULL, 1, 0); + TEST_ASSERT(rc == 1, "Failed to link queue 0 to port 0 with profile 0"); + q = 1; + rc = rte_event_port_profile_links_set(TEST_DEV_ID, 0, &q, NULL, 1, 1); + TEST_ASSERT(rc == 1, "Failed to link queue 1 to port 0 with profile 1"); + + rc = rte_event_port_profile_links_get(TEST_DEV_ID, 0, queues, priorities, 0); + TEST_ASSERT(rc == 1, "Failed to links"); + TEST_ASSERT(queues[0] == 0, "Invalid queue found in link"); + + rc = rte_event_port_profile_links_get(TEST_DEV_ID, 0, queues, priorities, 1); + TEST_ASSERT(rc == 1, "Failed to links"); + TEST_ASSERT(queues[0] == 1, "Invalid queue found in link"); + + rc = rte_event_dev_start(TEST_DEV_ID); + TEST_ASSERT_SUCCESS(rc, "Failed to start event device"); + + ev.event_type = RTE_EVENT_TYPE_CPU; + ev.queue_id = 0; + ev.op = RTE_EVENT_OP_NEW; + ev.flow_id = 0; + ev.u64 = 0xBADF00D0; + rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1); + TEST_ASSERT(rc == 1, "Failed to enqueue event"); + ev.queue_id = 1; + ev.flow_id = 1; + rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1); + TEST_ASSERT(rc == 1, "Failed to enqueue event"); + + ev.event = 0; + ev.u64 = 0; + + rc = rte_event_port_profile_switch(TEST_DEV_ID, 0, 1); + TEST_ASSERT_SUCCESS(rc, "Failed to change profile"); + + re = MAX_RETRIES; + while (re--) { + rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0); + printf("rc %d\n", rc); + if (rc) + break; + } + + TEST_ASSERT(rc == 1, "Failed to dequeue event from profile 1"); + TEST_ASSERT(ev.flow_id == 1, "Incorrect flow identifier from profile 1"); + TEST_ASSERT(ev.queue_id == 1, "Incorrect queue identifier from profile 1"); + + re = MAX_RETRIES; + while (re--) { + rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0); + TEST_ASSERT(rc == 0, "Unexpected event dequeued from active profile"); + } + + rc = rte_event_port_profile_switch(TEST_DEV_ID, 0, 0); + TEST_ASSERT_SUCCESS(rc, "Failed to change profile"); + + re = MAX_RETRIES; + while (re--) { + rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0); + if (rc) + break; + } + + TEST_ASSERT(rc == 1, "Failed to dequeue event from profile 1"); + TEST_ASSERT(ev.flow_id == 0, "Incorrect flow identifier from profile 0"); + TEST_ASSERT(ev.queue_id == 0, "Incorrect queue identifier from profile 0"); + + re = MAX_RETRIES; + while (re--) { + rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0); + TEST_ASSERT(rc == 0, "Unexpected event dequeued from active profile"); + } + + q = 0; + rc = rte_event_port_profile_unlink(TEST_DEV_ID, 0, &q, 1, 0); + TEST_ASSERT(rc == 1, "Failed to unlink queue 0 to port 0 with profile 0"); + q = 1; + rc = rte_event_port_profile_unlink(TEST_DEV_ID, 0, &q, 1, 1); + TEST_ASSERT(rc == 1, "Failed to unlink queue 1 to port 0 with profile 1"); + + return TEST_SUCCESS; +} + static int test_eventdev_close(void) { @@ -1187,6 +1302,8 @@ static struct unit_test_suite eventdev_common_testsuite = { test_eventdev_timeout_ticks), TEST_CASE_ST(NULL, NULL, test_eventdev_start_stop), + TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device, + test_eventdev_profile_switch), TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device, test_eventdev_link), TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device, @@ -1256,15 +1373,14 @@ test_eventdev_selftest_cn10k(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common); +REGISTER_FAST_TEST(eventdev_common_autotest, true, true, test_eventdev_common); #ifndef RTE_EXEC_ENV_WINDOWS -REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw); -REGISTER_TEST_COMMAND(eventdev_selftest_octeontx, - test_eventdev_selftest_octeontx); -REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2); -REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2); -REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k); -REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k); +REGISTER_FAST_TEST(eventdev_selftest_sw, true, true, test_eventdev_selftest_sw); +REGISTER_DRIVER_TEST(eventdev_selftest_octeontx, test_eventdev_selftest_octeontx); +REGISTER_DRIVER_TEST(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2); +REGISTER_DRIVER_TEST(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2); +REGISTER_DRIVER_TEST(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k); +REGISTER_DRIVER_TEST(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k); #endif /* !RTE_EXEC_ENV_WINDOWS */ diff --git a/app/test/test_fbarray.c b/app/test/test_fbarray.c index a691bf44584..26a51e2a3e3 100644 --- a/app/test/test_fbarray.c +++ b/app/test/test_fbarray.c @@ -733,4 +733,4 @@ test_fbarray(void) return unit_test_suite_runner(&fbarray_test_suite); } -REGISTER_TEST_COMMAND(fbarray_autotest, test_fbarray); +REGISTER_FAST_TEST(fbarray_autotest, true, true, test_fbarray); diff --git a/app/test/test_fib.c b/app/test/test_fib.c index eb69d6e2fd4..45dccca1f61 100644 --- a/app/test/test_fib.c +++ b/app/test/test_fib.c @@ -415,5 +415,5 @@ test_slow_fib(void) return unit_test_suite_runner(&fib_slow_tests); } -REGISTER_TEST_COMMAND(fib_autotest, test_fib); -REGISTER_TEST_COMMAND(fib_slow_autotest, test_slow_fib); +REGISTER_FAST_TEST(fib_autotest, true, true, test_fib); +REGISTER_PERF_TEST(fib_slow_autotest, test_slow_fib); diff --git a/app/test/test_fib6.c b/app/test/test_fib6.c index 15ad09178ae..2f836238fbf 100644 --- a/app/test/test_fib6.c +++ b/app/test/test_fib6.c @@ -424,5 +424,5 @@ test_slow_fib6(void) return unit_test_suite_runner(&fib6_slow_tests); } -REGISTER_TEST_COMMAND(fib6_autotest, test_fib6); -REGISTER_TEST_COMMAND(fib6_slow_autotest, test_slow_fib6); +REGISTER_FAST_TEST(fib6_autotest, true, true, test_fib6); +REGISTER_PERF_TEST(fib6_slow_autotest, test_slow_fib6); diff --git a/app/test/test_fib6_perf.c b/app/test/test_fib6_perf.c index add20c2331b..a7abc46af91 100644 --- a/app/test/test_fib6_perf.c +++ b/app/test/test_fib6_perf.c @@ -156,4 +156,4 @@ test_fib6_perf(void) return 0; } -REGISTER_TEST_COMMAND(fib6_perf_autotest, test_fib6_perf); +REGISTER_PERF_TEST(fib6_perf_autotest, test_fib6_perf); diff --git a/app/test/test_fib_perf.c b/app/test/test_fib_perf.c index b56293e64f4..a9119c1bb00 100644 --- a/app/test/test_fib_perf.c +++ b/app/test/test_fib_perf.c @@ -409,4 +409,4 @@ test_fib_perf(void) return 0; } -REGISTER_TEST_COMMAND(fib_perf_autotest, test_fib_perf); +REGISTER_PERF_TEST(fib_perf_autotest, test_fib_perf); diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c index ae9de6f93d0..9296de23b7d 100644 --- a/app/test/test_func_reentrancy.c +++ b/app/test/test_func_reentrancy.c @@ -507,4 +507,4 @@ test_func_reentrancy(void) return 0; } -REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy); +REGISTER_FAST_TEST(func_reentrancy_autotest, false, true, test_func_reentrancy); diff --git a/app/test/test_graph.c b/app/test/test_graph.c index af90ac07ec5..3dd017ebfbb 100644 --- a/app/test/test_graph.c +++ b/app/test/test_graph.c @@ -740,13 +740,13 @@ test_graph_model_mcore_dispatch_core_bind_unbind(void) ret = rte_graph_worker_model_set(RTE_GRAPH_MODEL_MCORE_DISPATCH); if (ret != 0) { printf("Set graph mcore dispatch model failed\n"); - ret = -1; + goto fail; } ret = rte_graph_model_mcore_dispatch_core_bind(cloned_graph_id, worker_lcore); if (ret != 0) { printf("bind graph %d to lcore %u failed\n", graph_id, worker_lcore); - ret = -1; + goto fail; } graph = rte_graph_lookup("worker0-cloned-test2"); @@ -755,6 +755,7 @@ test_graph_model_mcore_dispatch_core_bind_unbind(void) printf("bind graph %s(id:%d) with lcore %u failed\n", graph->name, graph->id, worker_lcore); ret = -1; + goto fail; } rte_graph_model_mcore_dispatch_core_unbind(cloned_graph_id); @@ -764,6 +765,7 @@ test_graph_model_mcore_dispatch_core_bind_unbind(void) ret = -1; } +fail: rte_graph_destroy(cloned_graph_id); return ret; @@ -781,7 +783,7 @@ test_graph_worker_model_set_get(void) ret = rte_graph_worker_model_set(RTE_GRAPH_MODEL_MCORE_DISPATCH); if (ret != 0) { printf("Set graph mcore dispatch model failed\n"); - ret = -1; + goto fail; } graph = rte_graph_lookup("worker0-cloned-test3"); @@ -790,9 +792,10 @@ test_graph_worker_model_set_get(void) ret = -1; } +fail: rte_graph_destroy(cloned_graph_id); - return 0; + return ret; } static int @@ -989,7 +992,7 @@ graph_autotest_fn(void) return unit_test_suite_runner(&graph_testsuite); } -REGISTER_TEST_COMMAND(graph_autotest, graph_autotest_fn); +REGISTER_FAST_TEST(graph_autotest, true, true, graph_autotest_fn); static int test_node_list_dump(void) @@ -1001,4 +1004,4 @@ test_node_list_dump(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(node_list_dump, test_node_list_dump); +REGISTER_FAST_TEST(node_list_dump, true, true, test_node_list_dump); diff --git a/app/test/test_graph_perf.c b/app/test/test_graph_perf.c index c5b463f700c..983735c2d9c 100644 --- a/app/test/test_graph_perf.c +++ b/app/test/test_graph_perf.c @@ -1073,4 +1073,4 @@ test_graph_perf_func(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(graph_perf_autotest, test_graph_perf_func); +REGISTER_PERF_TEST(graph_perf_autotest, test_graph_perf_func); diff --git a/app/test/test_hash.c b/app/test/test_hash.c index 3e45afaa67f..d586878a224 100644 --- a/app/test/test_hash.c +++ b/app/test/test_hash.c @@ -2264,4 +2264,4 @@ test_hash(void) return 0; } -REGISTER_TEST_COMMAND(hash_autotest, test_hash); +REGISTER_FAST_TEST(hash_autotest, true, true, test_hash); diff --git a/app/test/test_hash_functions.c b/app/test/test_hash_functions.c index 76d51b6e71a..70820d1f197 100644 --- a/app/test/test_hash_functions.c +++ b/app/test/test_hash_functions.c @@ -290,4 +290,4 @@ test_hash_functions(void) return 0; } -REGISTER_TEST_COMMAND(hash_functions_autotest, test_hash_functions); +REGISTER_PERF_TEST(hash_functions_autotest, test_hash_functions); diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c index 0c5a8ca1860..dd5ca677b96 100644 --- a/app/test/test_hash_multiwriter.c +++ b/app/test/test_hash_multiwriter.c @@ -287,4 +287,4 @@ test_hash_multiwriter_main(void) return 0; } -REGISTER_TEST_COMMAND(hash_multiwriter_autotest, test_hash_multiwriter_main); +REGISTER_PERF_TEST(hash_multiwriter_autotest, test_hash_multiwriter_main); diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c index 14a1283abaf..d66b96e5ced 100644 --- a/app/test/test_hash_perf.c +++ b/app/test/test_hash_perf.c @@ -757,4 +757,4 @@ test_hash_perf(void) return 0; } -REGISTER_TEST_COMMAND(hash_perf_autotest, test_hash_perf); +REGISTER_PERF_TEST(hash_perf_autotest, test_hash_perf); diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c index 6373e62d335..74ca13912f0 100644 --- a/app/test/test_hash_readwrite.c +++ b/app/test/test_hash_readwrite.c @@ -760,5 +760,5 @@ test_hash_rw_func_main(void) return 0; } -REGISTER_TEST_COMMAND(hash_readwrite_func_autotest, test_hash_rw_func_main); -REGISTER_TEST_COMMAND(hash_readwrite_perf_autotest, test_hash_rw_perf_main); +REGISTER_FAST_TEST(hash_readwrite_func_autotest, false, true, test_hash_rw_func_main); +REGISTER_PERF_TEST(hash_readwrite_perf_autotest, test_hash_rw_perf_main); diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c index cf86046a2f8..5d18850e199 100644 --- a/app/test/test_hash_readwrite_lf_perf.c +++ b/app/test/test_hash_readwrite_lf_perf.c @@ -1579,5 +1579,5 @@ test_hash_readwrite_lf_perf_main(void) return 0; } -REGISTER_TEST_COMMAND(hash_readwrite_lf_perf_autotest, +REGISTER_PERF_TEST(hash_readwrite_lf_perf_autotest, test_hash_readwrite_lf_perf_main); diff --git a/app/test/test_interrupts.c b/app/test/test_interrupts.c index b59ab426999..3952f9685fd 100644 --- a/app/test/test_interrupts.c +++ b/app/test/test_interrupts.c @@ -594,4 +594,4 @@ test_interrupt(void) return ret; } -REGISTER_TEST_COMMAND(interrupt_autotest, test_interrupt); +REGISTER_FAST_TEST(interrupt_autotest, true, true, test_interrupt); diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c index 402ce361c1a..8e4df220a21 100644 --- a/app/test/test_ipfrag.c +++ b/app/test/test_ipfrag.c @@ -510,4 +510,4 @@ test_ipfrag(void) } -REGISTER_TEST_COMMAND(ipfrag_autotest, test_ipfrag); +REGISTER_FAST_TEST(ipfrag_autotest, false, true, test_ipfrag); diff --git a/app/test/test_ipsec.c b/app/test/test_ipsec.c index c2a52ec3052..6cb1bac1e73 100644 --- a/app/test/test_ipsec.c +++ b/app/test/test_ipsec.c @@ -2532,4 +2532,4 @@ test_ipsec(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(ipsec_autotest, test_ipsec); +REGISTER_FAST_TEST(ipsec_autotest, true, true, test_ipsec); diff --git a/app/test/test_ipsec_perf.c b/app/test/test_ipsec_perf.c index b221b7fc320..a32a2086e98 100644 --- a/app/test/test_ipsec_perf.c +++ b/app/test/test_ipsec_perf.c @@ -631,4 +631,4 @@ test_libipsec_perf(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(ipsec_perf_autotest, test_libipsec_perf); +REGISTER_PERF_TEST(ipsec_perf_autotest, test_libipsec_perf); diff --git a/app/test/test_kvargs.c b/app/test/test_kvargs.c index b7b97a0dd9c..7a60cac4c11 100644 --- a/app/test/test_kvargs.c +++ b/app/test/test_kvargs.c @@ -292,4 +292,4 @@ test_kvargs(void) return 0; } -REGISTER_TEST_COMMAND(kvargs_autotest, test_kvargs); +REGISTER_FAST_TEST(kvargs_autotest, true, true, test_kvargs); diff --git a/app/test/test_latencystats.c b/app/test/test_latencystats.c index db06c7d5c70..c309ab194f0 100644 --- a/app/test/test_latencystats.c +++ b/app/test/test_latencystats.c @@ -216,4 +216,4 @@ static int test_latencystats(void) return unit_test_suite_runner(&latencystats_testsuite); } -REGISTER_TEST_COMMAND(latencystats_autotest, test_latencystats); +REGISTER_FAST_TEST(latencystats_autotest, true, true, test_latencystats); diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c index 2c945b01368..22225a9fd3d 100644 --- a/app/test/test_lcores.c +++ b/app/test/test_lcores.c @@ -2,7 +2,6 @@ * Copyright (c) 2020 Red Hat, Inc. */ -#include #include #include @@ -341,7 +340,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count) return -1; } -static void *ctrl_thread_loop(void *arg) +static uint32_t ctrl_thread_loop(void *arg) { struct thread_context *t = arg; @@ -350,7 +349,7 @@ static void *ctrl_thread_loop(void *arg) /* Set the thread state to DONE */ t->state = Thread_DONE; - return NULL; + return 0; } static int @@ -362,8 +361,8 @@ test_ctrl_thread(void) /* Create one control thread */ t = &ctrl_thread_context; t->state = Thread_INIT; - if (rte_ctrl_thread_create((pthread_t *)&t->id, "test_ctrl_threads", - NULL, ctrl_thread_loop, t) != 0) + if (rte_thread_create_control(&t->id, "dpdk-test-ctrlt", + ctrl_thread_loop, t) != 0) return -1; /* Wait till the control thread exits. @@ -412,4 +411,4 @@ test_lcores(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(lcores_autotest, test_lcores); +REGISTER_FAST_TEST(lcores_autotest, true, true, test_lcores); diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c index 2f46e4c6ee7..4d715c44652 100644 --- a/app/test/test_link_bonding.c +++ b/app/test/test_link_bonding.c @@ -10,8 +10,10 @@ #include #include #include +#include #include #include + #include #include #include @@ -52,20 +54,20 @@ #define MAX_PKT_BURST (512) #define DEF_PKT_BURST (16) -#define BONDED_DEV_NAME ("net_bonding_ut") +#define BONDING_DEV_NAME ("net_bonding_ut") #define INVALID_SOCKET_ID (-1) #define INVALID_PORT_ID (-1) #define INVALID_BONDING_MODE (-1) -uint8_t slave_mac[] = {0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 }; -uint8_t bonded_mac[] = {0xAA, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF }; +uint8_t member_mac[] = {0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 }; +uint8_t bonding_mac[] = {0xAA, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF }; struct link_bonding_unittest_params { - int16_t bonded_port_id; - int16_t slave_port_ids[TEST_MAX_NUMBER_OF_PORTS]; - uint16_t bonded_slave_count; + int16_t bonding_port_id; + int16_t member_port_ids[TEST_MAX_NUMBER_OF_PORTS]; + uint16_t bonding_member_count; uint8_t bonding_mode; uint16_t nb_rx_q; @@ -73,8 +75,8 @@ struct link_bonding_unittest_params { struct rte_mempool *mbuf_pool; - struct rte_ether_addr *default_slave_mac; - struct rte_ether_addr *default_bonded_mac; + struct rte_ether_addr *default_member_mac; + struct rte_ether_addr *default_bonding_mac; /* Packet Headers */ struct rte_ether_hdr *pkt_eth_hdr; @@ -89,9 +91,9 @@ static struct rte_ipv6_hdr pkt_ipv6_hdr; static struct rte_udp_hdr pkt_udp_hdr; static struct link_bonding_unittest_params default_params = { - .bonded_port_id = -1, - .slave_port_ids = { -1 }, - .bonded_slave_count = 0, + .bonding_port_id = -1, + .member_port_ids = { -1 }, + .bonding_member_count = 0, .bonding_mode = BONDING_MODE_ROUND_ROBIN, .nb_rx_q = 1, @@ -99,8 +101,8 @@ static struct link_bonding_unittest_params default_params = { .mbuf_pool = NULL, - .default_slave_mac = (struct rte_ether_addr *)slave_mac, - .default_bonded_mac = (struct rte_ether_addr *)bonded_mac, + .default_member_mac = (struct rte_ether_addr *)member_mac, + .default_bonding_mac = (struct rte_ether_addr *)bonding_mac, .pkt_eth_hdr = NULL, .pkt_ipv4_hdr = &pkt_ipv4_hdr, @@ -202,8 +204,8 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr) return 0; } -static int slaves_initialized; -static int mac_slaves_initialized; +static int members_initialized; +static int mac_members_initialized; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t cvar = PTHREAD_COND_INITIALIZER; @@ -213,7 +215,7 @@ static int test_setup(void) { int i, nb_mbuf_per_pool; - struct rte_ether_addr *mac_addr = (struct rte_ether_addr *)slave_mac; + struct rte_ether_addr *mac_addr = (struct rte_ether_addr *)member_mac; /* Allocate ethernet packet header with space for VLAN header */ if (test_params->pkt_eth_hdr == NULL) { @@ -235,7 +237,7 @@ test_setup(void) } /* Create / Initialize virtual eth devs */ - if (!slaves_initialized) { + if (!members_initialized) { for (i = 0; i < TEST_MAX_NUMBER_OF_PORTS; i++) { char pmd_name[RTE_ETH_NAME_MAX_LEN]; @@ -243,64 +245,64 @@ test_setup(void) snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_%d", i); - test_params->slave_port_ids[i] = virtual_ethdev_create(pmd_name, + test_params->member_port_ids[i] = virtual_ethdev_create(pmd_name, mac_addr, rte_socket_id(), 1); - TEST_ASSERT(test_params->slave_port_ids[i] >= 0, + TEST_ASSERT(test_params->member_port_ids[i] >= 0, "Failed to create virtual virtual ethdev %s", pmd_name); TEST_ASSERT_SUCCESS(configure_ethdev( - test_params->slave_port_ids[i], 1, 0), + test_params->member_port_ids[i], 1, 0), "Failed to configure virtual ethdev %s", pmd_name); } - slaves_initialized = 1; + members_initialized = 1; } return 0; } static int -test_create_bonded_device(void) +test_create_bonding_device(void) { - int current_slave_count; + int current_member_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - /* Don't try to recreate bonded device if re-running test suite*/ - if (test_params->bonded_port_id == -1) { - test_params->bonded_port_id = rte_eth_bond_create(BONDED_DEV_NAME, + /* Don't try to recreate bonding device if re-running test suite*/ + if (test_params->bonding_port_id == -1) { + test_params->bonding_port_id = rte_eth_bond_create(BONDING_DEV_NAME, test_params->bonding_mode, rte_socket_id()); - TEST_ASSERT(test_params->bonded_port_id >= 0, - "Failed to create bonded ethdev %s", BONDED_DEV_NAME); + TEST_ASSERT(test_params->bonding_port_id >= 0, + "Failed to create bonding ethdev %s", BONDING_DEV_NAME); - TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 0, 0), - "Failed to configure bonded ethdev %s", BONDED_DEV_NAME); + TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonding_port_id, 0, 0), + "Failed to configure bonding ethdev %s", BONDING_DEV_NAME); } - TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonded_port_id, + TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonding_port_id, test_params->bonding_mode), "Failed to set ethdev %d to mode %d", - test_params->bonded_port_id, test_params->bonding_mode); + test_params->bonding_port_id, test_params->bonding_mode); - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, 0, - "Number of slaves %d is great than expected %d.", - current_slave_count, 0); + TEST_ASSERT_EQUAL(current_member_count, 0, + "Number of members %d is great than expected %d.", + current_member_count, 0); - current_slave_count = rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS); + current_member_count = rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, 0, - "Number of active slaves %d is great than expected %d.", - current_slave_count, 0); + TEST_ASSERT_EQUAL(current_member_count, 0, + "Number of active members %d is great than expected %d.", + current_member_count, 0); return 0; } static int -test_create_bonded_device_with_invalid_params(void) +test_create_bonding_device_with_invalid_params(void) { int port_id; @@ -309,66 +311,66 @@ test_create_bonded_device_with_invalid_params(void) /* Invalid name */ port_id = rte_eth_bond_create(NULL, test_params->bonding_mode, rte_socket_id()); - TEST_ASSERT(port_id < 0, "Created bonded device unexpectedly"); + TEST_ASSERT(port_id < 0, "Created bonding device unexpectedly"); test_params->bonding_mode = INVALID_BONDING_MODE; /* Invalid bonding mode */ - port_id = rte_eth_bond_create(BONDED_DEV_NAME, test_params->bonding_mode, + port_id = rte_eth_bond_create(BONDING_DEV_NAME, test_params->bonding_mode, rte_socket_id()); - TEST_ASSERT(port_id < 0, "Created bonded device unexpectedly."); + TEST_ASSERT(port_id < 0, "Created bonding device unexpectedly."); test_params->bonding_mode = BONDING_MODE_ROUND_ROBIN; /* Invalid socket id */ - port_id = rte_eth_bond_create(BONDED_DEV_NAME, test_params->bonding_mode, + port_id = rte_eth_bond_create(BONDING_DEV_NAME, test_params->bonding_mode, INVALID_SOCKET_ID); - TEST_ASSERT(port_id < 0, "Created bonded device unexpectedly."); + TEST_ASSERT(port_id < 0, "Created bonding device unexpectedly."); return 0; } static int -test_add_slave_to_bonded_device(void) +test_add_member_to_bonding_device(void) { - int current_slave_count; + int current_member_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params->bonded_port_id, - test_params->slave_port_ids[test_params->bonded_slave_count]), - "Failed to add slave (%d) to bonded port (%d).", - test_params->slave_port_ids[test_params->bonded_slave_count], - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(test_params->bonding_port_id, + test_params->member_port_ids[test_params->bonding_member_count]), + "Failed to add member (%d) to bonding port (%d).", + test_params->member_port_ids[test_params->bonding_member_count], + test_params->bonding_port_id); - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, test_params->bonded_slave_count + 1, - "Number of slaves (%d) is greater than expected (%d).", - current_slave_count, test_params->bonded_slave_count + 1); + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, test_params->bonding_member_count + 1, + "Number of members (%d) is greater than expected (%d).", + current_member_count, test_params->bonding_member_count + 1); - current_slave_count = rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, 0, - "Number of active slaves (%d) is not as expected (%d).\n", - current_slave_count, 0); + current_member_count = rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, 0, + "Number of active members (%d) is not as expected (%d).\n", + current_member_count, 0); - test_params->bonded_slave_count++; + test_params->bonding_member_count++; return 0; } static int -test_add_slave_to_invalid_bonded_device(void) +test_add_member_to_invalid_bonding_device(void) { /* Invalid port ID */ - TEST_ASSERT_FAIL(rte_eth_bond_slave_add(test_params->bonded_port_id + 5, - test_params->slave_port_ids[test_params->bonded_slave_count]), + TEST_ASSERT_FAIL(rte_eth_bond_member_add(test_params->bonding_port_id + 5, + test_params->member_port_ids[test_params->bonding_member_count]), "Expected call to failed as invalid port specified."); - /* Non bonded device */ - TEST_ASSERT_FAIL(rte_eth_bond_slave_add(test_params->slave_port_ids[0], - test_params->slave_port_ids[test_params->bonded_slave_count]), + /* Non bonding device */ + TEST_ASSERT_FAIL(rte_eth_bond_member_add(test_params->member_port_ids[0], + test_params->member_port_ids[test_params->bonding_member_count]), "Expected call to failed as invalid port specified."); return 0; @@ -376,287 +378,289 @@ test_add_slave_to_invalid_bonded_device(void) static int -test_remove_slave_from_bonded_device(void) +test_remove_member_from_bonding_device(void) { - int current_slave_count; + int current_member_count; struct rte_ether_addr read_mac_addr, *mac_addr; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_remove(test_params->bonded_port_id, - test_params->slave_port_ids[test_params->bonded_slave_count-1]), - "Failed to remove slave %d from bonded port (%d).", - test_params->slave_port_ids[test_params->bonded_slave_count-1], - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_bond_member_remove(test_params->bonding_port_id, + test_params->member_port_ids[test_params->bonding_member_count-1]), + "Failed to remove member %d from bonding port (%d).", + test_params->member_port_ids[test_params->bonding_member_count-1], + test_params->bonding_port_id); - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, test_params->bonded_slave_count - 1, - "Number of slaves (%d) is great than expected (%d).\n", - current_slave_count, test_params->bonded_slave_count - 1); + TEST_ASSERT_EQUAL(current_member_count, test_params->bonding_member_count - 1, + "Number of members (%d) is great than expected (%d).\n", + current_member_count, test_params->bonding_member_count - 1); - mac_addr = (struct rte_ether_addr *)slave_mac; + mac_addr = (struct rte_ether_addr *)member_mac; mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = - test_params->bonded_slave_count-1; + test_params->bonding_member_count-1; TEST_ASSERT_SUCCESS(rte_eth_macaddr_get( - test_params->slave_port_ids[test_params->bonded_slave_count-1], + test_params->member_port_ids[test_params->bonding_member_count-1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[test_params->bonded_slave_count-1]); + test_params->member_port_ids[test_params->bonding_member_count-1]); TEST_ASSERT_SUCCESS(memcmp(mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port mac address not set to that of primary port\n"); + "bonding port mac address not set to that of primary port\n"); rte_eth_stats_reset( - test_params->slave_port_ids[test_params->bonded_slave_count-1]); + test_params->member_port_ids[test_params->bonding_member_count-1]); - virtual_ethdev_simulate_link_status_interrupt(test_params->bonded_port_id, + virtual_ethdev_simulate_link_status_interrupt(test_params->bonding_port_id, 0); - test_params->bonded_slave_count--; + test_params->bonding_member_count--; return 0; } static int -test_remove_slave_from_invalid_bonded_device(void) +test_remove_member_from_invalid_bonding_device(void) { /* Invalid port ID */ - TEST_ASSERT_FAIL(rte_eth_bond_slave_remove( - test_params->bonded_port_id + 5, - test_params->slave_port_ids[test_params->bonded_slave_count - 1]), + TEST_ASSERT_FAIL(rte_eth_bond_member_remove( + test_params->bonding_port_id + 5, + test_params->member_port_ids[test_params->bonding_member_count - 1]), "Expected call to failed as invalid port specified."); - /* Non bonded device */ - TEST_ASSERT_FAIL(rte_eth_bond_slave_remove( - test_params->slave_port_ids[0], - test_params->slave_port_ids[test_params->bonded_slave_count - 1]), + /* Non bonding device */ + TEST_ASSERT_FAIL(rte_eth_bond_member_remove( + test_params->member_port_ids[0], + test_params->member_port_ids[test_params->bonding_member_count - 1]), "Expected call to failed as invalid port specified."); return 0; } -static int bonded_id = 2; +static int bonding_id = 2; static int -test_add_already_bonded_slave_to_bonded_device(void) +test_add_already_bonding_member_to_bonding_device(void) { - int port_id, current_slave_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + int port_id, current_member_count; + uint16_t members[RTE_MAX_ETHPORTS]; char pmd_name[RTE_ETH_NAME_MAX_LEN]; - test_add_slave_to_bonded_device(); + test_add_member_to_bonding_device(); - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, 1, - "Number of slaves (%d) is not that expected (%d).", - current_slave_count, 1); + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, 1, + "Number of members (%d) is not that expected (%d).", + current_member_count, 1); - snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, "%s_%d", BONDED_DEV_NAME, ++bonded_id); + snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, "%s_%d", BONDING_DEV_NAME, ++bonding_id); port_id = rte_eth_bond_create(pmd_name, test_params->bonding_mode, rte_socket_id()); - TEST_ASSERT(port_id >= 0, "Failed to create bonded device."); + TEST_ASSERT(port_id >= 0, "Failed to create bonding device."); - TEST_ASSERT(rte_eth_bond_slave_add(port_id, - test_params->slave_port_ids[test_params->bonded_slave_count - 1]) + TEST_ASSERT(rte_eth_bond_member_add(port_id, + test_params->member_port_ids[test_params->bonding_member_count - 1]) < 0, - "Added slave (%d) to bonded port (%d) unexpectedly.", - test_params->slave_port_ids[test_params->bonded_slave_count-1], + "Added member (%d) to bonding port (%d) unexpectedly.", + test_params->member_port_ids[test_params->bonding_member_count-1], port_id); - return test_remove_slave_from_bonded_device(); + return test_remove_member_from_bonding_device(); } static int -test_get_slaves_from_bonded_device(void) +test_get_members_from_bonding_device(void) { - int current_slave_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + int current_member_count; + uint16_t members[RTE_MAX_ETHPORTS]; - TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), - "Failed to add slave to bonded device"); + TEST_ASSERT_SUCCESS(test_add_member_to_bonding_device(), + "Failed to add member to bonding device"); /* Invalid port id */ - current_slave_count = rte_eth_bond_slaves_get(INVALID_PORT_ID, slaves, + current_member_count = rte_eth_bond_members_get(INVALID_PORT_ID, members, RTE_MAX_ETHPORTS); - TEST_ASSERT(current_slave_count < 0, + TEST_ASSERT(current_member_count < 0, "Invalid port id unexpectedly succeeded"); - current_slave_count = rte_eth_bond_active_slaves_get(INVALID_PORT_ID, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT(current_slave_count < 0, + current_member_count = rte_eth_bond_active_members_get(INVALID_PORT_ID, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT(current_member_count < 0, "Invalid port id unexpectedly succeeded"); - /* Invalid slaves pointer */ - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, + /* Invalid members pointer */ + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, NULL, RTE_MAX_ETHPORTS); - TEST_ASSERT(current_slave_count < 0, - "Invalid slave array unexpectedly succeeded"); - - current_slave_count = rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, NULL, RTE_MAX_ETHPORTS); - TEST_ASSERT(current_slave_count < 0, - "Invalid slave array unexpectedly succeeded"); - - /* non bonded device*/ - current_slave_count = rte_eth_bond_slaves_get( - test_params->slave_port_ids[0], NULL, RTE_MAX_ETHPORTS); - TEST_ASSERT(current_slave_count < 0, + TEST_ASSERT(current_member_count < 0, + "Invalid member array unexpectedly succeeded"); + + current_member_count = rte_eth_bond_active_members_get( + test_params->bonding_port_id, NULL, RTE_MAX_ETHPORTS); + TEST_ASSERT(current_member_count < 0, + "Invalid member array unexpectedly succeeded"); + + /* non bonding device*/ + current_member_count = rte_eth_bond_members_get( + test_params->member_port_ids[0], NULL, RTE_MAX_ETHPORTS); + TEST_ASSERT(current_member_count < 0, "Invalid port id unexpectedly succeeded"); - current_slave_count = rte_eth_bond_active_slaves_get( - test_params->slave_port_ids[0], NULL, RTE_MAX_ETHPORTS); - TEST_ASSERT(current_slave_count < 0, + current_member_count = rte_eth_bond_active_members_get( + test_params->member_port_ids[0], NULL, RTE_MAX_ETHPORTS); + TEST_ASSERT(current_member_count < 0, "Invalid port id unexpectedly succeeded"); - TEST_ASSERT_SUCCESS(test_remove_slave_from_bonded_device(), - "Failed to remove slaves from bonded device"); + TEST_ASSERT_SUCCESS(test_remove_member_from_bonding_device(), + "Failed to remove members from bonding device"); return 0; } static int -test_add_remove_multiple_slaves_to_from_bonded_device(void) +test_add_remove_multiple_members_to_from_bonding_device(void) { int i; for (i = 0; i < TEST_MAX_NUMBER_OF_PORTS; i++) - TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), - "Failed to add slave to bonded device"); + TEST_ASSERT_SUCCESS(test_add_member_to_bonding_device(), + "Failed to add member to bonding device"); for (i = 0; i < TEST_MAX_NUMBER_OF_PORTS; i++) - TEST_ASSERT_SUCCESS(test_remove_slave_from_bonded_device(), - "Failed to remove slaves from bonded device"); + TEST_ASSERT_SUCCESS(test_remove_member_from_bonding_device(), + "Failed to remove members from bonding device"); return 0; } static void -enable_bonded_slaves(void) +enable_bonding_members(void) { int i; - for (i = 0; i < test_params->bonded_slave_count; i++) { - virtual_ethdev_tx_burst_fn_set_success(test_params->slave_port_ids[i], + for (i = 0; i < test_params->bonding_member_count; i++) { + virtual_ethdev_tx_burst_fn_set_success(test_params->member_port_ids[i], 1); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 1); + test_params->member_port_ids[i], 1); } } static int -test_start_bonded_device(void) +test_start_bonding_device(void) { struct rte_eth_link link_status; - int current_slave_count, current_bonding_mode, primary_port; - uint16_t slaves[RTE_MAX_ETHPORTS]; + int current_member_count, current_bonding_mode, primary_port; + uint16_t members[RTE_MAX_ETHPORTS]; int retval; - /* Add slave to bonded device*/ - TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), - "Failed to add slave to bonded device"); + /* Add member to bonding device*/ + TEST_ASSERT_SUCCESS(test_add_member_to_bonding_device(), + "Failed to add member to bonding device"); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), - "Failed to start bonded pmd eth device %d.", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), + "Failed to start bonding pmd eth device %d.", + test_params->bonding_port_id); - /* Change link status of virtual pmd so it will be added to the active - * slave list of the bonded device*/ + /* + * Change link status of virtual pmd so it will be added to the active + * member list of the bonding device. + */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[test_params->bonded_slave_count-1], 1); + test_params->member_port_ids[test_params->bonding_member_count-1], 1); - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, test_params->bonded_slave_count, - "Number of slaves (%d) is not expected value (%d).", - current_slave_count, test_params->bonded_slave_count); + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, test_params->bonding_member_count, + "Number of members (%d) is not expected value (%d).", + current_member_count, test_params->bonding_member_count); - current_slave_count = rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, test_params->bonded_slave_count, - "Number of active slaves (%d) is not expected value (%d).", - current_slave_count, test_params->bonded_slave_count); + current_member_count = rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, test_params->bonding_member_count, + "Number of active members (%d) is not expected value (%d).", + current_member_count, test_params->bonding_member_count); - current_bonding_mode = rte_eth_bond_mode_get(test_params->bonded_port_id); + current_bonding_mode = rte_eth_bond_mode_get(test_params->bonding_port_id); TEST_ASSERT_EQUAL(current_bonding_mode, test_params->bonding_mode, - "Bonded device mode (%d) is not expected value (%d).\n", + "Bonding device mode (%d) is not expected value (%d).\n", current_bonding_mode, test_params->bonding_mode); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); - TEST_ASSERT_EQUAL(primary_port, test_params->slave_port_ids[0], + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); + TEST_ASSERT_EQUAL(primary_port, test_params->member_port_ids[0], "Primary port (%d) is not expected value (%d).", - primary_port, test_params->slave_port_ids[0]); + primary_port, test_params->member_port_ids[0]); - retval = rte_eth_link_get(test_params->bonded_port_id, &link_status); + retval = rte_eth_link_get(test_params->bonding_port_id, &link_status); TEST_ASSERT(retval >= 0, - "Bonded port (%d) link get failed: %s\n", - test_params->bonded_port_id, rte_strerror(-retval)); + "Bonding port (%d) link get failed: %s\n", + test_params->bonding_port_id, rte_strerror(-retval)); TEST_ASSERT_EQUAL(link_status.link_status, 1, - "Bonded port (%d) status (%d) is not expected value (%d).\n", - test_params->bonded_port_id, link_status.link_status, 1); + "Bonding port (%d) status (%d) is not expected value (%d).\n", + test_params->bonding_port_id, link_status.link_status, 1); return 0; } static int -test_stop_bonded_device(void) +test_stop_bonding_device(void) { - int current_slave_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + int current_member_count; + uint16_t members[RTE_MAX_ETHPORTS]; struct rte_eth_link link_status; int retval; - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - retval = rte_eth_link_get(test_params->bonded_port_id, &link_status); + retval = rte_eth_link_get(test_params->bonding_port_id, &link_status); TEST_ASSERT(retval >= 0, - "Bonded port (%d) link get failed: %s\n", - test_params->bonded_port_id, rte_strerror(-retval)); + "Bonding port (%d) link get failed: %s\n", + test_params->bonding_port_id, rte_strerror(-retval)); TEST_ASSERT_EQUAL(link_status.link_status, 0, - "Bonded port (%d) status (%d) is not expected value (%d).", - test_params->bonded_port_id, link_status.link_status, 0); + "Bonding port (%d) status (%d) is not expected value (%d).", + test_params->bonding_port_id, link_status.link_status, 0); - current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, test_params->bonded_slave_count, - "Number of slaves (%d) is not expected value (%d).", - current_slave_count, test_params->bonded_slave_count); + current_member_count = rte_eth_bond_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, test_params->bonding_member_count, + "Number of members (%d) is not expected value (%d).", + current_member_count, test_params->bonding_member_count); - current_slave_count = rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(current_slave_count, 0, - "Number of active slaves (%d) is not expected value (%d).", - current_slave_count, 0); + current_member_count = rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(current_member_count, 0, + "Number of active members (%d) is not expected value (%d).", + current_member_count, 0); return 0; } static int -remove_slaves_and_stop_bonded_device(void) +remove_members_and_stop_bonding_device(void) { - /* Clean up and remove slaves from bonded device */ + /* Clean up and remove members from bonding device */ free_virtualpmd_tx_queue(); - while (test_params->bonded_slave_count > 0) - TEST_ASSERT_SUCCESS(test_remove_slave_from_bonded_device(), - "test_remove_slave_from_bonded_device failed"); + while (test_params->bonding_member_count > 0) + TEST_ASSERT_SUCCESS(test_remove_member_from_bonding_device(), + "test_remove_member_from_bonding_device failed"); - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - rte_eth_stats_reset(test_params->bonded_port_id); - rte_eth_bond_mac_address_reset(test_params->bonded_port_id); + rte_eth_stats_reset(test_params->bonding_port_id); + rte_eth_bond_mac_address_reset(test_params->bonding_port_id); return 0; } @@ -680,21 +684,21 @@ test_set_bonding_mode(void) "Expected call to failed as invalid port (%d) specified.", INVALID_PORT_ID); - /* Non bonded device */ - TEST_ASSERT_FAIL(rte_eth_bond_mode_set(test_params->slave_port_ids[0], + /* Non bonding device */ + TEST_ASSERT_FAIL(rte_eth_bond_mode_set(test_params->member_port_ids[0], bonding_modes[i]), "Expected call to failed as invalid port (%d) specified.", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonded_port_id, + TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonding_port_id, bonding_modes[i]), "Failed to set link bonding mode on port (%d) to (%d).", - test_params->bonded_port_id, bonding_modes[i]); + test_params->bonding_port_id, bonding_modes[i]); - bonding_mode = rte_eth_bond_mode_get(test_params->bonded_port_id); + bonding_mode = rte_eth_bond_mode_get(test_params->bonding_port_id); TEST_ASSERT_EQUAL(bonding_mode, bonding_modes[i], "Link bonding mode (%d) of port (%d) is not expected value (%d).", - bonding_mode, test_params->bonded_port_id, + bonding_mode, test_params->bonding_port_id, bonding_modes[i]); /* Invalid port ID */ @@ -703,102 +707,105 @@ test_set_bonding_mode(void) "Expected call to failed as invalid port (%d) specified.", INVALID_PORT_ID); - /* Non bonded device */ - bonding_mode = rte_eth_bond_mode_get(test_params->slave_port_ids[0]); + /* Non bonding device */ + bonding_mode = rte_eth_bond_mode_get(test_params->member_port_ids[0]); TEST_ASSERT(bonding_mode < 0, "Expected call to failed as invalid port (%d) specified.", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); } - return remove_slaves_and_stop_bonded_device(); + return remove_members_and_stop_bonding_device(); } static int -test_set_primary_slave(void) +test_set_primary_member(void) { int i, j, retval; struct rte_ether_addr read_mac_addr; struct rte_ether_addr *expected_mac_addr; - /* Add 4 slaves to bonded device */ - for (i = test_params->bonded_slave_count; i < 4; i++) - TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), - "Failed to add slave to bonded device."); + /* Add 4 members to bonding device */ + for (i = test_params->bonding_member_count; i < 4; i++) + TEST_ASSERT_SUCCESS(test_add_member_to_bonding_device(), + "Failed to add member to bonding device."); - TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonded_port_id, + TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonding_port_id, BONDING_MODE_ROUND_ROBIN), "Failed to set link bonding mode on port (%d) to (%d).", - test_params->bonded_port_id, BONDING_MODE_ROUND_ROBIN); + test_params->bonding_port_id, BONDING_MODE_ROUND_ROBIN); /* Invalid port ID */ TEST_ASSERT_FAIL(rte_eth_bond_primary_set(INVALID_PORT_ID, - test_params->slave_port_ids[i]), + test_params->member_port_ids[i]), "Expected call to failed as invalid port specified."); - /* Non bonded device */ - TEST_ASSERT_FAIL(rte_eth_bond_primary_set(test_params->slave_port_ids[i], - test_params->slave_port_ids[i]), + /* Non bonding device */ + TEST_ASSERT_FAIL(rte_eth_bond_primary_set(test_params->member_port_ids[i], + test_params->member_port_ids[i]), "Expected call to failed as invalid port specified."); - /* Set slave as primary - * Verify slave it is now primary slave - * Verify that MAC address of bonded device is that of primary slave - * Verify that MAC address of all bonded slaves are that of primary slave + /* Set member as primary + * Verify member it is now primary member + * Verify that MAC address of bonding device is that of primary member + * Verify that MAC address of all bonding members are that of primary member */ for (i = 0; i < 4; i++) { - TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonded_port_id, - test_params->slave_port_ids[i]), - "Failed to set bonded port (%d) primary port to (%d)", - test_params->bonded_port_id, test_params->slave_port_ids[i]); + TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonding_port_id, + test_params->member_port_ids[i]), + "Failed to set bonding port (%d) primary port to (%d)", + test_params->bonding_port_id, test_params->member_port_ids[i]); - retval = rte_eth_bond_primary_get(test_params->bonded_port_id); + retval = rte_eth_bond_primary_get(test_params->bonding_port_id); TEST_ASSERT(retval >= 0, - "Failed to read primary port from bonded port (%d)\n", - test_params->bonded_port_id); + "Failed to read primary port from bonding port (%d)\n", + test_params->bonding_port_id); - TEST_ASSERT_EQUAL(retval, test_params->slave_port_ids[i], - "Bonded port (%d) primary port (%d) not expected value (%d)\n", - test_params->bonded_port_id, retval, - test_params->slave_port_ids[i]); + TEST_ASSERT_EQUAL(retval, test_params->member_port_ids[i], + "Bonding port (%d) primary port (%d) not expected value (%d)\n", + test_params->bonding_port_id, retval, + test_params->member_port_ids[i]); - /* stop/start bonded eth dev to apply new MAC */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + /* stop/start bonding eth dev to apply new MAC */ + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), - "Failed to start bonded port %d", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), + "Failed to start bonding port %d", + test_params->bonding_port_id); - expected_mac_addr = (struct rte_ether_addr *)&slave_mac; + expected_mac_addr = (struct rte_ether_addr *)&member_mac; expected_mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i; - /* Check primary slave MAC */ - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + /* Check primary member MAC */ + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(expected_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port mac address not set to that of primary port\n"); + "bonding port mac address not set to that of primary port\n"); - /* Check bonded MAC */ - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + /* Check bonding MAC */ + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&read_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port mac address not set to that of primary port\n"); + "bonding port mac address not set to that of primary port\n"); - /* Check other slaves MACs */ + /* Check other members MACs */ for (j = 0; j < 4; j++) { if (j != i) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[j], + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get( + test_params->member_port_ids[j], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[j]); + test_params->member_port_ids[j]); TEST_ASSERT_SUCCESS(memcmp(expected_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port mac address not set to that of primary " + "member port mac address not set to that of primary " "port"); } } @@ -806,382 +813,383 @@ test_set_primary_slave(void) /* Test with none existent port */ - TEST_ASSERT_FAIL(rte_eth_bond_primary_get(test_params->bonded_port_id + 10), + TEST_ASSERT_FAIL(rte_eth_bond_primary_get(test_params->bonding_port_id + 10), "read primary port from expectedly"); - /* Test with slave port */ - TEST_ASSERT_FAIL(rte_eth_bond_primary_get(test_params->slave_port_ids[0]), + /* Test with member port */ + TEST_ASSERT_FAIL(rte_eth_bond_primary_get(test_params->member_port_ids[0]), "read primary port from expectedly\n"); - TEST_ASSERT_SUCCESS(remove_slaves_and_stop_bonded_device(), - "Failed to stop and remove slaves from bonded device"); + TEST_ASSERT_SUCCESS(remove_members_and_stop_bonding_device(), + "Failed to stop and remove members from bonding device"); - /* No slaves */ - TEST_ASSERT(rte_eth_bond_primary_get(test_params->bonded_port_id) < 0, + /* No members */ + TEST_ASSERT(rte_eth_bond_primary_get(test_params->bonding_port_id) < 0, "read primary port from expectedly\n"); return 0; } static int -test_set_explicit_bonded_mac(void) +test_set_explicit_bonding_mac(void) { int i; struct rte_ether_addr read_mac_addr; struct rte_ether_addr *mac_addr; - uint8_t explicit_bonded_mac[] = { 0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x01 }; + uint8_t explicit_bonding_mac[] = { 0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x01 }; - mac_addr = (struct rte_ether_addr *)explicit_bonded_mac; + mac_addr = (struct rte_ether_addr *)explicit_bonding_mac; /* Invalid port ID */ TEST_ASSERT_FAIL(rte_eth_bond_mac_address_set(INVALID_PORT_ID, mac_addr), "Expected call to failed as invalid port specified."); - /* Non bonded device */ + /* Non bonding device */ TEST_ASSERT_FAIL(rte_eth_bond_mac_address_set( - test_params->slave_port_ids[0], mac_addr), + test_params->member_port_ids[0], mac_addr), "Expected call to failed as invalid port specified."); /* NULL MAC address */ TEST_ASSERT_FAIL(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, NULL), + test_params->bonding_port_id, NULL), "Expected call to failed as NULL MAC specified"); TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, mac_addr), - "Failed to set MAC address on bonded port (%d)", - test_params->bonded_port_id); - - /* Add 4 slaves to bonded device */ - for (i = test_params->bonded_slave_count; i < 4; i++) { - TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), - "Failed to add slave to bonded device.\n"); + test_params->bonding_port_id, mac_addr), + "Failed to set MAC address on bonding port (%d)", + test_params->bonding_port_id); + + /* Add 4 members to bonding device */ + for (i = test_params->bonding_member_count; i < 4; i++) { + TEST_ASSERT_SUCCESS(test_add_member_to_bonding_device(), + "Failed to add member to bonding device.\n"); } - /* Check bonded MAC */ - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + /* Check bonding MAC */ + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port mac address not set to that of primary port"); + "bonding port mac address not set to that of primary port"); - /* Check other slaves MACs */ + /* Check other members MACs */ for (i = 0; i < 4; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port mac address not set to that of primary port"); + "member port mac address not set to that of primary port"); } - /* test resetting mac address on bonded device */ + /* test resetting mac address on bonding device */ TEST_ASSERT_SUCCESS( - rte_eth_bond_mac_address_reset(test_params->bonded_port_id), - "Failed to reset MAC address on bonded port (%d)", - test_params->bonded_port_id); + rte_eth_bond_mac_address_reset(test_params->bonding_port_id), + "Failed to reset MAC address on bonding port (%d)", + test_params->bonding_port_id); TEST_ASSERT_FAIL( - rte_eth_bond_mac_address_reset(test_params->slave_port_ids[0]), - "Reset MAC address on bonded port (%d) unexpectedly", - test_params->slave_port_ids[1]); + rte_eth_bond_mac_address_reset(test_params->member_port_ids[0]), + "Reset MAC address on bonding port (%d) unexpectedly", + test_params->member_port_ids[1]); - /* test resetting mac address on bonded device with no slaves */ - TEST_ASSERT_SUCCESS(remove_slaves_and_stop_bonded_device(), - "Failed to remove slaves and stop bonded device"); + /* test resetting mac address on bonding device with no members */ + TEST_ASSERT_SUCCESS(remove_members_and_stop_bonding_device(), + "Failed to remove members and stop bonding device"); - TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_reset(test_params->bonded_port_id), - "Failed to reset MAC address on bonded port (%d)", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_reset(test_params->bonding_port_id), + "Failed to reset MAC address on bonding port (%d)", + test_params->bonding_port_id); return 0; } -#define BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT (3) +#define BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT (3) static int -test_set_bonded_port_initialization_mac_assignment(void) +test_set_bonding_port_initialization_mac_assignment(void) { - int i, slave_count; + int i, member_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; - static int bonded_port_id = -1; - static int slave_port_ids[BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT]; + uint16_t members[RTE_MAX_ETHPORTS]; + static int bonding_port_id = -1; + static int member_port_ids[BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT]; - struct rte_ether_addr slave_mac_addr, bonded_mac_addr, read_mac_addr; + struct rte_ether_addr member_mac_addr, bonding_mac_addr, read_mac_addr; /* Initialize default values for MAC addresses */ - memcpy(&slave_mac_addr, slave_mac, sizeof(struct rte_ether_addr)); - memcpy(&bonded_mac_addr, slave_mac, sizeof(struct rte_ether_addr)); + memcpy(&member_mac_addr, member_mac, sizeof(struct rte_ether_addr)); + memcpy(&bonding_mac_addr, member_mac, sizeof(struct rte_ether_addr)); /* - * 1. a - Create / configure bonded / slave ethdevs + * 1. a - Create / configure bonding / member ethdevs */ - if (bonded_port_id == -1) { - bonded_port_id = rte_eth_bond_create("net_bonding_mac_ass_test", + if (bonding_port_id == -1) { + bonding_port_id = rte_eth_bond_create("net_bonding_mac_ass_test", BONDING_MODE_ACTIVE_BACKUP, rte_socket_id()); - TEST_ASSERT(bonded_port_id > 0, "failed to create bonded device"); + TEST_ASSERT(bonding_port_id > 0, "failed to create bonding device"); - TEST_ASSERT_SUCCESS(configure_ethdev(bonded_port_id, 0, 0), - "Failed to configure bonded ethdev"); + TEST_ASSERT_SUCCESS(configure_ethdev(bonding_port_id, 0, 0), + "Failed to configure bonding ethdev"); } - if (!mac_slaves_initialized) { - for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) { + if (!mac_members_initialized) { + for (i = 0; i < BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT; i++) { char pmd_name[RTE_ETH_NAME_MAX_LEN]; - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = i + 100; snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, - "eth_slave_%d", i); + "eth_member_%d", i); - slave_port_ids[i] = virtual_ethdev_create(pmd_name, - &slave_mac_addr, rte_socket_id(), 1); + member_port_ids[i] = virtual_ethdev_create(pmd_name, + &member_mac_addr, rte_socket_id(), 1); - TEST_ASSERT(slave_port_ids[i] >= 0, - "Failed to create slave ethdev %s", + TEST_ASSERT(member_port_ids[i] >= 0, + "Failed to create member ethdev %s", pmd_name); - TEST_ASSERT_SUCCESS(configure_ethdev(slave_port_ids[i], 1, 0), + TEST_ASSERT_SUCCESS(configure_ethdev(member_port_ids[i], 1, 0), "Failed to configure virtual ethdev %s", pmd_name); } - mac_slaves_initialized = 1; + mac_members_initialized = 1; } /* - * 2. Add slave ethdevs to bonded device + * 2. Add member ethdevs to bonding device */ - for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) { - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(bonded_port_id, - slave_port_ids[i]), - "Failed to add slave (%d) to bonded port (%d).", - slave_port_ids[i], bonded_port_id); + for (i = 0; i < BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT; i++) { + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(bonding_port_id, + member_port_ids[i]), + "Failed to add member (%d) to bonding port (%d).", + member_port_ids[i], bonding_port_id); } - slave_count = rte_eth_bond_slaves_get(bonded_port_id, slaves, + member_count = rte_eth_bond_members_get(bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT, slave_count, - "Number of slaves (%d) is not as expected (%d)", - slave_count, BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT); + TEST_ASSERT_EQUAL(BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT, member_count, + "Number of members (%d) is not as expected (%d)", + member_count, BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT); /* - * 3. Set explicit MAC address on bonded ethdev + * 3. Set explicit MAC address on bonding ethdev */ - bonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-2] = 0xFF; - bonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0xAA; + bonding_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-2] = 0xFF; + bonding_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0xAA; TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - bonded_port_id, &bonded_mac_addr), - "Failed to set MAC address on bonded port (%d)", - bonded_port_id); + bonding_port_id, &bonding_mac_addr), + "Failed to set MAC address on bonding port (%d)", + bonding_port_id); - /* 4. a - Start bonded ethdev - * b - Enable slave devices - * c - Verify bonded/slaves ethdev MAC addresses + /* 4. a - Start bonding ethdev + * b - Enable member devices + * c - Verify bonding/members ethdev MAC addresses */ - TEST_ASSERT_SUCCESS(rte_eth_dev_start(bonded_port_id), - "Failed to start bonded pmd eth device %d.", - bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(bonding_port_id), + "Failed to start bonding pmd eth device %d.", + bonding_port_id); - for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) { + for (i = 0; i < BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT; i++) { virtual_ethdev_simulate_link_status_interrupt( - slave_port_ids[i], 1); + member_port_ids[i], 1); } - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac_addr, &read_mac_addr, + bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port mac address not as expected"); + "bonding port mac address not as expected"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[0]); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac_addr, &read_mac_addr, + member_port_ids[0]); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 0 mac address not as expected"); + "member port 0 mac address not as expected"); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[1]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[1]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 1 mac address not as expected"); + "member port 1 mac address not as expected"); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[2], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[2]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[2]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 2 mac address not as expected"); + "member port 2 mac address not as expected"); /* 7. a - Change primary port - * b - Stop / Start bonded port - * d - Verify slave ethdev MAC addresses + * b - Stop / Start bonding port + * d - Verify member ethdev MAC addresses */ - TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(bonded_port_id, - slave_port_ids[2]), - "failed to set primary port on bonded device."); + TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(bonding_port_id, + member_port_ids[2]), + "failed to set primary port on bonding device."); - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(bonded_port_id), - "Failed to stop bonded port %u", - bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(bonding_port_id), + "Failed to stop bonding port %u", + bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(bonded_port_id), - "Failed to start bonded pmd eth device %d.", - bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(bonding_port_id), + "Failed to start bonding pmd eth device %d.", + bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac_addr, &read_mac_addr, + bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port mac address not as expected"); + "bonding port mac address not as expected"); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[0]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[0]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 0 mac address not as expected"); + "member port 0 mac address not as expected"); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[1]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[1]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 1 mac address not as expected"); + "member port 1 mac address not as expected"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[2], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[2]); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac_addr, &read_mac_addr, + member_port_ids[2]); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 2 mac address not as expected"); + "member port 2 mac address not as expected"); - /* 6. a - Stop bonded ethdev - * b - remove slave ethdevs - * c - Verify slave ethdevs MACs are restored + /* 6. a - Stop bonding ethdev + * b - remove member ethdevs + * c - Verify member ethdevs MACs are restored */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(bonded_port_id), - "Failed to stop bonded port %u", - bonded_port_id); - - for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) { - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_remove(bonded_port_id, - slave_port_ids[i]), - "Failed to remove slave %d from bonded port (%d).", - slave_port_ids[i], bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(bonding_port_id), + "Failed to stop bonding port %u", + bonding_port_id); + + for (i = 0; i < BONDING_INIT_MAC_ASSIGNMENT_MEMBER_COUNT; i++) { + TEST_ASSERT_SUCCESS(rte_eth_bond_member_remove(bonding_port_id, + member_port_ids[i]), + "Failed to remove member %d from bonding port (%d).", + member_port_ids[i], bonding_port_id); } - slave_count = rte_eth_bond_slaves_get(bonded_port_id, slaves, + member_count = rte_eth_bond_members_get(bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 0, - "Number of slaves (%d) is great than expected (%d).", - slave_count, 0); + TEST_ASSERT_EQUAL(member_count, 0, + "Number of members (%d) is great than expected (%d).", + member_count, 0); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[0]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[0]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 0 mac address not as expected"); + "member port 0 mac address not as expected"); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[1]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[1]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 1 mac address not as expected"); + "member port 1 mac address not as expected"); - slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr), + member_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100; + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(member_port_ids[2], &read_mac_addr), "Failed to get mac address (port %d)", - slave_port_ids[2]); - TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr, + member_port_ids[2]); + TEST_ASSERT_SUCCESS(memcmp(&member_mac_addr, &read_mac_addr, sizeof(read_mac_addr)), - "slave port 2 mac address not as expected"); + "member port 2 mac address not as expected"); return 0; } static int -initialize_bonded_device_with_slaves(uint8_t bonding_mode, uint8_t bond_en_isr, - uint16_t number_of_slaves, uint8_t enable_slave) +initialize_bonding_device_with_members(uint8_t bonding_mode, uint8_t bond_en_isr, + uint16_t number_of_members, uint8_t enable_member) { - /* Configure bonded device */ - TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 0, + /* Configure bonding device */ + TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonding_port_id, 0, bond_en_isr), "Failed to configure bonding port (%d) in mode %d " - "with (%d) slaves.", test_params->bonded_port_id, bonding_mode, - number_of_slaves); + "with (%d) members.", test_params->bonding_port_id, bonding_mode, + number_of_members); - /* Add slaves to bonded device */ - while (number_of_slaves > test_params->bonded_slave_count) - TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), - "Failed to add slave (%d to bonding port (%d).", - test_params->bonded_slave_count - 1, - test_params->bonded_port_id); + /* Add members to bonding device */ + while (number_of_members > test_params->bonding_member_count) + TEST_ASSERT_SUCCESS(test_add_member_to_bonding_device(), + "Failed to add member (%d to bonding port (%d).", + test_params->bonding_member_count - 1, + test_params->bonding_port_id); /* Set link bonding mode */ - TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonded_port_id, + TEST_ASSERT_SUCCESS(rte_eth_bond_mode_set(test_params->bonding_port_id, bonding_mode), "Failed to set link bonding mode on port (%d) to (%d).", - test_params->bonded_port_id, bonding_mode); + test_params->bonding_port_id, bonding_mode); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), - "Failed to start bonded pmd eth device %d.", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), + "Failed to start bonding pmd eth device %d.", + test_params->bonding_port_id); - if (enable_slave) - enable_bonded_slaves(); + if (enable_member) + enable_bonding_members(); return 0; } static int -test_adding_slave_after_bonded_device_started(void) +test_adding_member_after_bonding_device_started(void) { int i; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, 4, 0), - "Failed to add slaves to bonded device"); + "Failed to add members to bonding device"); - /* Enabled slave devices */ - for (i = 0; i < test_params->bonded_slave_count + 1; i++) { + /* Enabled member devices */ + for (i = 0; i < test_params->bonding_member_count + 1; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 1); + test_params->member_port_ids[i], 1); } - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params->bonded_port_id, - test_params->slave_port_ids[test_params->bonded_slave_count]), - "Failed to add slave to bonded port.\n"); + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(test_params->bonding_port_id, + test_params->member_port_ids[test_params->bonding_member_count]), + "Failed to add member to bonding port.\n"); rte_eth_stats_reset( - test_params->slave_port_ids[test_params->bonded_slave_count]); + test_params->member_port_ids[test_params->bonding_member_count]); - test_params->bonded_slave_count++; + test_params->bonding_member_count++; - return remove_slaves_and_stop_bonded_device(); + return remove_members_and_stop_bonding_device(); } -#define TEST_STATUS_INTERRUPT_SLAVE_COUNT 4 +#define TEST_STATUS_INTERRUPT_MEMBER_COUNT 4 #define TEST_LSC_WAIT_TIMEOUT_US 500000 int test_lsc_interrupt_count; @@ -1237,43 +1245,43 @@ lsc_timeout(int wait_us) static int test_status_interrupt(void) { - int slave_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + int member_count; + uint16_t members[RTE_MAX_ETHPORTS]; - /* initialized bonding device with T slaves */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* initialized bonding device with T members */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 1, - TEST_STATUS_INTERRUPT_SLAVE_COUNT, 1), - "Failed to initialise bonded device"); + TEST_STATUS_INTERRUPT_MEMBER_COUNT, 1), + "Failed to initialise bonding device"); test_lsc_interrupt_count = 0; /* register link status change interrupt callback */ - rte_eth_dev_callback_register(test_params->bonded_port_id, + rte_eth_dev_callback_register(test_params->bonding_port_id, RTE_ETH_EVENT_INTR_LSC, test_bonding_lsc_event_callback, - &test_params->bonded_port_id); + &test_params->bonding_port_id); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, TEST_STATUS_INTERRUPT_SLAVE_COUNT, - "Number of active slaves (%d) is not as expected (%d)", - slave_count, TEST_STATUS_INTERRUPT_SLAVE_COUNT); + TEST_ASSERT_EQUAL(member_count, TEST_STATUS_INTERRUPT_MEMBER_COUNT, + "Number of active members (%d) is not as expected (%d)", + member_count, TEST_STATUS_INTERRUPT_MEMBER_COUNT); - /* Bring all 4 slaves link status to down and test that we have received a + /* Bring all 4 members link status to down and test that we have received a * lsc interrupts */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[0], 0); + test_params->member_port_ids[0], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 0); + test_params->member_port_ids[1], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[2], 0); + test_params->member_port_ids[2], 0); TEST_ASSERT_EQUAL(test_lsc_interrupt_count, 0, "Received a link status change interrupt unexpectedly"); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 0); + test_params->member_port_ids[3], 0); TEST_ASSERT(lsc_timeout(TEST_LSC_WAIT_TIMEOUT_US) == 0, "timed out waiting for interrupt"); @@ -1281,18 +1289,18 @@ test_status_interrupt(void) TEST_ASSERT(test_lsc_interrupt_count > 0, "Did not receive link status change interrupt"); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 0, - "Number of active slaves (%d) is not as expected (%d)", - slave_count, 0); + TEST_ASSERT_EQUAL(member_count, 0, + "Number of active members (%d) is not as expected (%d)", + member_count, 0); - /* bring one slave port up so link status will change */ + /* bring one member port up so link status will change */ test_lsc_interrupt_count = 0; virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[0], 1); + test_params->member_port_ids[0], 1); TEST_ASSERT(lsc_timeout(TEST_LSC_WAIT_TIMEOUT_US) == 0, "timed out waiting for interrupt"); @@ -1301,12 +1309,14 @@ test_status_interrupt(void) TEST_ASSERT(test_lsc_interrupt_count > 0, "Did not receive link status change interrupt"); - /* Verify that calling the same slave lsc interrupt doesn't cause another - * lsc interrupt from bonded device */ + /* + * Verify that calling the same member lsc interrupt doesn't cause another + * lsc interrupt from bonding device. + */ test_lsc_interrupt_count = 0; virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[0], 1); + test_params->member_port_ids[0], 1); TEST_ASSERT(lsc_timeout(TEST_LSC_WAIT_TIMEOUT_US) != 0, "received unexpected interrupt"); @@ -1316,12 +1326,12 @@ test_status_interrupt(void) /* unregister lsc callback before exiting */ - rte_eth_dev_callback_unregister(test_params->bonded_port_id, + rte_eth_dev_callback_unregister(test_params->bonding_port_id, RTE_ETH_EVENT_INTR_LSC, test_bonding_lsc_event_callback, - &test_params->bonded_port_id); + &test_params->bonding_port_id); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -1398,11 +1408,11 @@ test_roundrobin_tx_burst(void) struct rte_mbuf *pkt_burst[MAX_PKT_BURST]; struct rte_eth_stats port_stats; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, 2, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); - burst_size = 20 * test_params->bonded_slave_count; + burst_size = 20 * test_params->bonding_member_count; TEST_ASSERT(burst_size <= MAX_PKT_BURST, "Burst size specified is greater than supported."); @@ -1411,41 +1421,41 @@ test_roundrobin_tx_burst(void) TEST_ASSERT_EQUAL(generate_test_burst(pkt_burst, burst_size, 0, 1, 0, 0, 0), burst_size, "failed to generate test burst"); - /* Send burst on bonded port */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, pkt_burst, burst_size), burst_size, + test_params->bonding_port_id, 0, pkt_burst, burst_size), burst_size, "tx burst failed"); - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, - "Bonded Port (%d) opackets value (%u) not as expected (%d)\n", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, + "Bonding Port (%d) opackets value (%u) not as expected (%d)\n", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, burst_size); - /* Verify slave ports tx stats */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats); + /* Verify member ports tx stats */ + for (i = 0; i < test_params->bonding_member_count; i++) { + rte_eth_stats_get(test_params->member_port_ids[i], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)burst_size / test_params->bonded_slave_count, - "Slave Port (%d) opackets value (%u) not as expected (%d)\n", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, - burst_size / test_params->bonded_slave_count); + (uint64_t)burst_size / test_params->bonding_member_count, + "Member Port (%d) opackets value (%u) not as expected (%d)\n", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, + burst_size / test_params->bonding_member_count); } - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ - TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonded_port_id, 0, + /* Send burst on bonding port */ + TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonding_port_id, 0, pkt_burst, burst_size), 0, "tx burst return unexpected value"); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -1471,13 +1481,13 @@ free_mbufs(struct rte_mbuf **mbufs, int nb_mbufs) rte_pktmbuf_free(mbufs[i]); } -#define TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT (2) -#define TEST_RR_SLAVE_TX_FAIL_BURST_SIZE (64) -#define TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT (22) -#define TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX (1) +#define TEST_RR_MEMBER_TX_FAIL_MEMBER_COUNT (2) +#define TEST_RR_MEMBER_TX_FAIL_BURST_SIZE (64) +#define TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT (22) +#define TEST_RR_MEMBER_TX_FAIL_FAILING_MEMBER_IDX (1) static int -test_roundrobin_tx_burst_slave_tx_fail(void) +test_roundrobin_tx_burst_member_tx_fail(void) { struct rte_mbuf *pkt_burst[MAX_PKT_BURST]; struct rte_mbuf *expected_tx_fail_pkts[MAX_PKT_BURST]; @@ -1486,97 +1496,99 @@ test_roundrobin_tx_burst_slave_tx_fail(void) int i, first_fail_idx, tx_count; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, - TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT, 1), - "Failed to initialise bonded device"); + TEST_RR_MEMBER_TX_FAIL_MEMBER_COUNT, 1), + "Failed to initialise bonding device"); /* Generate test bursts of packets to transmit */ TEST_ASSERT_EQUAL(generate_test_burst(pkt_burst, - TEST_RR_SLAVE_TX_FAIL_BURST_SIZE, 0, 1, 0, 0, 0), - TEST_RR_SLAVE_TX_FAIL_BURST_SIZE, + TEST_RR_MEMBER_TX_FAIL_BURST_SIZE, 0, 1, 0, 0, 0), + TEST_RR_MEMBER_TX_FAIL_BURST_SIZE, "Failed to generate test packet burst"); /* Copy references to packets which we expect not to be transmitted */ - first_fail_idx = (TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - - (TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT * - TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT)) + - TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX; + first_fail_idx = (TEST_RR_MEMBER_TX_FAIL_BURST_SIZE - + (TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT * + TEST_RR_MEMBER_TX_FAIL_MEMBER_COUNT)) + + TEST_RR_MEMBER_TX_FAIL_FAILING_MEMBER_IDX; - for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + for (i = 0; i < TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT; i++) { expected_tx_fail_pkts[i] = pkt_burst[first_fail_idx + - (i * TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT)]; + (i * TEST_RR_MEMBER_TX_FAIL_MEMBER_COUNT)]; } - /* Set virtual slave to only fail transmission of - * TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT packets in burst */ + /* + * Set virtual member to only fail transmission of + * TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT packets in burst. + */ virtual_ethdev_tx_burst_fn_set_success( - test_params->slave_port_ids[TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + test_params->member_port_ids[TEST_RR_MEMBER_TX_FAIL_FAILING_MEMBER_IDX], 0); virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( - test_params->slave_port_ids[TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + test_params->member_port_ids[TEST_RR_MEMBER_TX_FAIL_FAILING_MEMBER_IDX], + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT); - tx_count = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkt_burst, - TEST_RR_SLAVE_TX_FAIL_BURST_SIZE); + tx_count = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkt_burst, + TEST_RR_MEMBER_TX_FAIL_BURST_SIZE); - TEST_ASSERT_EQUAL(tx_count, TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, + TEST_ASSERT_EQUAL(tx_count, TEST_RR_MEMBER_TX_FAIL_BURST_SIZE - + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT, "Transmitted (%d) an unexpected (%d) number of packets", tx_count, - TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + TEST_RR_MEMBER_TX_FAIL_BURST_SIZE - + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT); /* Verify that failed packet are expected failed packets */ - for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + for (i = 0; i < TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT; i++) { TEST_ASSERT_EQUAL(expected_tx_fail_pkts[i], pkt_burst[i + tx_count], "expected mbuf (%d) pointer %p not expected pointer %p", i, expected_tx_fail_pkts[i], pkt_burst[i + tx_count]); } - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, - TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + (uint64_t)TEST_RR_MEMBER_TX_FAIL_BURST_SIZE - + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT, + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, + TEST_RR_MEMBER_TX_FAIL_BURST_SIZE - + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT); - /* Verify slave ports tx stats */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - int slave_expected_tx_count; + /* Verify member ports tx stats */ + for (i = 0; i < test_params->bonding_member_count; i++) { + int member_expected_tx_count; - rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[i], &port_stats); - slave_expected_tx_count = TEST_RR_SLAVE_TX_FAIL_BURST_SIZE / - test_params->bonded_slave_count; + member_expected_tx_count = TEST_RR_MEMBER_TX_FAIL_BURST_SIZE / + test_params->bonding_member_count; - if (i == TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX) - slave_expected_tx_count = slave_expected_tx_count - - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; + if (i == TEST_RR_MEMBER_TX_FAIL_FAILING_MEMBER_IDX) + member_expected_tx_count = member_expected_tx_count - + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT; TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)slave_expected_tx_count, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[i], - (unsigned int)port_stats.opackets, slave_expected_tx_count); + (uint64_t)member_expected_tx_count, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[i], + (unsigned int)port_stats.opackets, member_expected_tx_count); } /* Verify that all mbufs have a ref value of zero */ TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkt_burst[tx_count], - TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, 1), + TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT, 1), "mbufs refcnts not as expected"); - free_mbufs(&pkt_burst[tx_count], TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + free_mbufs(&pkt_burst[tx_count], TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int -test_roundrobin_rx_burst_on_single_slave(void) +test_roundrobin_rx_burst_on_single_member(void) { struct rte_mbuf *gen_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; @@ -1585,59 +1597,59 @@ test_roundrobin_rx_burst_on_single_slave(void) int i, j, burst_size = 25; - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, 4, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); /* Generate test bursts of packets to transmit */ TEST_ASSERT_EQUAL(generate_test_burst( gen_pkt_burst, burst_size, 0, 1, 0, 0, 0), burst_size, "burst generation failed"); - for (i = 0; i < test_params->bonded_slave_count; i++) { - /* Add rx data to slave */ - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + for (i = 0; i < test_params->bonding_member_count; i++) { + /* Add rx data to member */ + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[0], burst_size); - /* Call rx burst on bonded device */ - /* Send burst on bonded port */ + /* Call rx burst on bonding device */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_rx_burst( - test_params->bonded_port_id, 0, rx_pkt_burst, + test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), burst_size, "round-robin rx burst failed"); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, - "Bonded Port (%d) ipackets value (%u) not as expected (%d)", - test_params->bonded_port_id, + "Bonding Port (%d) ipackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.ipackets, burst_size); - /* Verify bonded slave devices rx count */ - /* Verify slave ports tx stats */ - for (j = 0; j < test_params->bonded_slave_count; j++) { - rte_eth_stats_get(test_params->slave_port_ids[j], &port_stats); + /* Verify bonding member devices rx count */ + /* Verify member ports tx stats */ + for (j = 0; j < test_params->bonding_member_count; j++) { + rte_eth_stats_get(test_params->member_port_ids[j], &port_stats); if (i == j) { TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, - "Slave Port (%d) ipackets value (%u) not as expected" - " (%d)", test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as expected" + " (%d)", test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, burst_size); } else { TEST_ASSERT_EQUAL(port_stats.ipackets, 0, - "Slave Port (%d) ipackets value (%u) not as expected" - " (%d)", test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as expected" + " (%d)", test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, 0); } - /* Reset bonded slaves stats */ - rte_eth_stats_reset(test_params->slave_port_ids[j]); + /* Reset bonding members stats */ + rte_eth_stats_reset(test_params->member_port_ids[j]); } - /* reset bonded device stats */ - rte_eth_stats_reset(test_params->bonded_port_id); + /* reset bonding device stats */ + rte_eth_stats_reset(test_params->bonding_port_id); } /* free mbufs */ @@ -1646,80 +1658,80 @@ test_roundrobin_rx_burst_on_single_slave(void) } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_ROUNDROBIN_TX_BURST_SLAVE_COUNT (3) +#define TEST_ROUNDROBIN_TX_BURST_MEMBER_COUNT (3) static int -test_roundrobin_rx_burst_on_multiple_slaves(void) +test_roundrobin_rx_burst_on_multiple_members(void) { - struct rte_mbuf *gen_pkt_burst[TEST_ROUNDROBIN_TX_BURST_SLAVE_COUNT][MAX_PKT_BURST]; + struct rte_mbuf *gen_pkt_burst[TEST_ROUNDROBIN_TX_BURST_MEMBER_COUNT][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - int burst_size[TEST_ROUNDROBIN_TX_BURST_SLAVE_COUNT] = { 15, 13, 36 }; + int burst_size[TEST_ROUNDROBIN_TX_BURST_MEMBER_COUNT] = { 15, 13, 36 }; int i, nb_rx; - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, 4, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); /* Generate test bursts of packets to transmit */ - for (i = 0; i < TEST_ROUNDROBIN_TX_BURST_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_ROUNDROBIN_TX_BURST_MEMBER_COUNT; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &gen_pkt_burst[i][0], burst_size[i], 0, 1, 0, 0, 0), burst_size[i], "burst generation failed"); } - /* Add rx data to slaves */ - for (i = 0; i < TEST_ROUNDROBIN_TX_BURST_SLAVE_COUNT; i++) { - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + /* Add rx data to members */ + for (i = 0; i < TEST_ROUNDROBIN_TX_BURST_MEMBER_COUNT; i++) { + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[i][0], burst_size[i]); } - /* Call rx burst on bonded device */ - /* Send burst on bonded port */ - nb_rx = rte_eth_rx_burst(test_params->bonded_port_id, 0, rx_pkt_burst, + /* Call rx burst on bonding device */ + /* Send burst on bonding port */ + nb_rx = rte_eth_rx_burst(test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST); TEST_ASSERT_EQUAL(nb_rx , burst_size[0] + burst_size[1] + burst_size[2], "round-robin rx burst failed (%d != %d)\n", nb_rx, burst_size[0] + burst_size[1] + burst_size[2]); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)(burst_size[0] + burst_size[1] + burst_size[2]), - "Bonded Port (%d) ipackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.ipackets, + "Bonding Port (%d) ipackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.ipackets, burst_size[0] + burst_size[1] + burst_size[2]); - /* Verify bonded slave devices rx counts */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + /* Verify bonding member devices rx counts */ + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[0], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.ipackets, burst_size[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[1], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[1], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[1], (unsigned int)port_stats.ipackets, burst_size[1]); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[2], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[2], + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[2], (unsigned int)port_stats.ipackets, burst_size[2]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, 0, - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[3], + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[3], (unsigned int)port_stats.ipackets, 0); /* free mbufs */ @@ -1727,8 +1739,8 @@ test_roundrobin_rx_burst_on_multiple_slaves(void) rte_pktmbuf_free(rx_pkt_burst[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -1739,98 +1751,106 @@ test_roundrobin_verify_mac_assignment(void) int i; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &expected_mac_addr_0), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], + &expected_mac_addr_0), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[2], &expected_mac_addr_2), + test_params->member_port_ids[0]); + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[2], + &expected_mac_addr_2), "Failed to get mac address (port %d)", - test_params->slave_port_ids[2]); + test_params->member_port_ids[2]); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, 4, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); - /* Verify that all MACs are the same as first slave added to bonded dev */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + /* Verify that all MACs are the same as first member added to bonding dev */ + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[i]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[i]); } /* change primary and verify that MAC addresses haven't changed */ - TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonded_port_id, - test_params->slave_port_ids[2]), - "Failed to set bonded port (%d) primary port to (%d)", - test_params->bonded_port_id, test_params->slave_port_ids[i]); - - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonding_port_id, + test_params->member_port_ids[2]), + "Failed to set bonding port (%d) primary port to (%d)", + test_params->bonding_port_id, test_params->member_port_ids[i]); + + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address has changed to that of primary" - " port without stop/start toggle of bonded device", - test_params->slave_port_ids[i]); + "member port (%d) mac address has changed to that of primary" + " port without stop/start toggle of bonding device", + test_params->member_port_ids[i]); } - /* stop / start bonded device and verify that primary MAC address is - * propagate to bonded device and slaves */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + /* + * stop / start bonding device and verify that primary MAC address is + * propagate to bonding device and members. + */ + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), - "Failed to start bonded device"); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), + "Failed to start bonding device"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS( memcmp(&expected_mac_addr_2, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of new primary port", - test_params->slave_port_ids[i]); + "bonding port (%d) mac address not set to that of new primary port", + test_params->member_port_ids[i]); - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_2, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of new primary" - " port", test_params->slave_port_ids[i]); + "member port (%d) mac address not set to that of new primary" + " port", test_params->member_port_ids[i]); } /* Set explicit MAC address */ TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, - (struct rte_ether_addr *)bonded_mac), + test_params->bonding_port_id, + (struct rte_ether_addr *)bonding_mac), "Failed to set MAC"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(bonded_mac, &read_mac_addr, + test_params->bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of new primary port", - test_params->slave_port_ids[i]); + "bonding port (%d) mac address not set to that of new primary port", + test_params->member_port_ids[i]); - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); - TEST_ASSERT_SUCCESS(memcmp(bonded_mac, &read_mac_addr, - sizeof(read_mac_addr)), "slave port (%d) mac address not set to" - " that of new primary port\n", test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); + TEST_ASSERT_SUCCESS(memcmp(bonding_mac, &read_mac_addr, + sizeof(read_mac_addr)), "member port (%d) mac address not set to" + " that of new primary port\n", test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -1839,278 +1859,278 @@ test_roundrobin_verify_promiscuous_enable_disable(void) int i, promiscuous_en; int ret; - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ROUND_ROBIN, 0, 4, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); - ret = rte_eth_promiscuous_enable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_enable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to enable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - promiscuous_en = rte_eth_promiscuous_get(test_params->bonded_port_id); + promiscuous_en = rte_eth_promiscuous_get(test_params->bonding_port_id); TEST_ASSERT_EQUAL(promiscuous_en, 1, "Port (%d) promiscuous mode not enabled", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { promiscuous_en = rte_eth_promiscuous_get( - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_EQUAL(promiscuous_en, 1, - "slave port (%d) promiscuous mode not enabled", - test_params->slave_port_ids[i]); + "member port (%d) promiscuous mode not enabled", + test_params->member_port_ids[i]); } - ret = rte_eth_promiscuous_disable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_disable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to disable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - promiscuous_en = rte_eth_promiscuous_get(test_params->bonded_port_id); + promiscuous_en = rte_eth_promiscuous_get(test_params->bonding_port_id); TEST_ASSERT_EQUAL(promiscuous_en, 0, "Port (%d) promiscuous mode not disabled\n", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { promiscuous_en = rte_eth_promiscuous_get( - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_EQUAL(promiscuous_en, 0, "Port (%d) promiscuous mode not disabled\n", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_RR_LINK_STATUS_SLAVE_COUNT (4) -#define TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_SLAVE_COUNT (2) +#define TEST_RR_LINK_STATUS_MEMBER_COUNT (4) +#define TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_MEMBER_COUNT (2) static int -test_roundrobin_verify_slave_link_status_change_behaviour(void) +test_roundrobin_verify_member_link_status_change_behaviour(void) { struct rte_mbuf *tx_pkt_burst[MAX_PKT_BURST] = { NULL }; - struct rte_mbuf *gen_pkt_burst[TEST_RR_LINK_STATUS_SLAVE_COUNT][MAX_PKT_BURST]; + struct rte_mbuf *gen_pkt_burst[TEST_RR_LINK_STATUS_MEMBER_COUNT][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - int i, burst_size, slave_count; + int i, burst_size, member_count; /* NULL all pointers in array to simplify cleanup */ memset(gen_pkt_burst, 0, sizeof(gen_pkt_burst)); - /* Initialize bonded device with TEST_RR_LINK_STATUS_SLAVE_COUNT slaves + /* Initialize bonding device with TEST_RR_LINK_STATUS_MEMBER_COUNT members * in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( - BONDING_MODE_ROUND_ROBIN, 0, TEST_RR_LINK_STATUS_SLAVE_COUNT, 1), - "Failed to initialize bonded device with slaves"); + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( + BONDING_MODE_ROUND_ROBIN, 0, TEST_RR_LINK_STATUS_MEMBER_COUNT, 1), + "Failed to initialize bonding device with members"); - /* Verify Current Slaves Count /Active Slave Count is */ - slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves, + /* Verify Current Members Count /Active Member Count is */ + member_count = rte_eth_bond_members_get(test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, TEST_RR_LINK_STATUS_SLAVE_COUNT, - "Number of slaves (%d) is not as expected (%d).", - slave_count, TEST_RR_LINK_STATUS_SLAVE_COUNT); + TEST_ASSERT_EQUAL(member_count, TEST_RR_LINK_STATUS_MEMBER_COUNT, + "Number of members (%d) is not as expected (%d).", + member_count, TEST_RR_LINK_STATUS_MEMBER_COUNT); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, TEST_RR_LINK_STATUS_SLAVE_COUNT, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, TEST_RR_LINK_STATUS_SLAVE_COUNT); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, TEST_RR_LINK_STATUS_MEMBER_COUNT, + "Number of active members (%d) is not as expected (%d).", + member_count, TEST_RR_LINK_STATUS_MEMBER_COUNT); - /* Set 2 slaves eth_devs link status to down */ + /* Set 2 members eth_devs link status to down */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 0); + test_params->member_port_ids[1], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 0); + test_params->member_port_ids[3], 0); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, - TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_SLAVE_COUNT, - "Number of active slaves (%d) is not as expected (%d).\n", - slave_count, TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_SLAVE_COUNT); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, + TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_MEMBER_COUNT, + "Number of active members (%d) is not as expected (%d).\n", + member_count, TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_MEMBER_COUNT); burst_size = 20; - /* Verify that pkts are not sent on slaves with link status down: + /* Verify that pkts are not sent on members with link status down: * * 1. Generate test burst of traffic - * 2. Transmit burst on bonded eth_dev - * 3. Verify stats for bonded eth_dev (opackets = burst_size) - * 4. Verify stats for slave eth_devs (s0 = 10, s1 = 0, s2 = 10, s3 = 0) + * 2. Transmit burst on bonding eth_dev + * 3. Verify stats for bonding eth_dev (opackets = burst_size) + * 4. Verify stats for member eth_devs (s0 = 10, s1 = 0, s2 = 10, s3 = 0) */ TEST_ASSERT_EQUAL( generate_test_burst(tx_pkt_burst, burst_size, 0, 1, 0, 0, 0), burst_size, "generate_test_burst failed"); - rte_eth_stats_reset(test_params->bonded_port_id); + rte_eth_stats_reset(test_params->bonding_port_id); TEST_ASSERT_EQUAL( - rte_eth_tx_burst(test_params->bonded_port_id, 0, tx_pkt_burst, + rte_eth_tx_burst(test_params->bonding_port_id, 0, tx_pkt_burst, burst_size), burst_size, "rte_eth_tx_burst failed"); - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "Port (%d) opackets stats (%d) not expected (%d) value", - test_params->bonded_port_id, (int)port_stats.opackets, + test_params->bonding_port_id, (int)port_stats.opackets, burst_size); - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)10, "Port (%d) opackets stats (%d) not expected (%d) value", - test_params->slave_port_ids[0], (int)port_stats.opackets, 10); + test_params->member_port_ids[0], (int)port_stats.opackets, 10); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)0, "Port (%d) opackets stats (%d) not expected (%d) value", - test_params->slave_port_ids[1], (int)port_stats.opackets, 0); + test_params->member_port_ids[1], (int)port_stats.opackets, 0); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)10, "Port (%d) opackets stats (%d) not expected (%d) value", - test_params->slave_port_ids[2], (int)port_stats.opackets, 10); + test_params->member_port_ids[2], (int)port_stats.opackets, 10); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)0, "Port (%d) opackets stats (%d) not expected (%d) value", - test_params->slave_port_ids[3], (int)port_stats.opackets, 0); + test_params->member_port_ids[3], (int)port_stats.opackets, 0); - /* Verify that pkts are not sent on slaves with link status down: + /* Verify that pkts are not sent on members with link status down: * * 1. Generate test bursts of traffic * 2. Add bursts on to virtual eth_devs - * 3. Rx burst on bonded eth_dev, expected (burst_ size * - * TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_SLAVE_COUNT) received - * 4. Verify stats for bonded eth_dev - * 6. Verify stats for slave eth_devs (s0 = 10, s1 = 0, s2 = 10, s3 = 0) + * 3. Rx burst on bonding eth_dev, expected (burst_ size * + * TEST_RR_LINK_STATUS_EXPECTED_ACTIVE_MEMBER_COUNT) received + * 4. Verify stats for bonding eth_dev + * 6. Verify stats for member eth_devs (s0 = 10, s1 = 0, s2 = 10, s3 = 0) */ - for (i = 0; i < TEST_RR_LINK_STATUS_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_RR_LINK_STATUS_MEMBER_COUNT; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &gen_pkt_burst[i][0], burst_size, 0, 1, 0, 0, 0), burst_size, "failed to generate packet burst"); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[i][0], burst_size); } TEST_ASSERT_EQUAL(rte_eth_rx_burst( - test_params->bonded_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), + test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), burst_size + burst_size, "rte_eth_rx_burst failed"); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets , (uint64_t)(burst_size + burst_size), "(%d) port_stats.ipackets not as expected\n", - test_params->bonded_port_id); + test_params->bonding_port_id); /* free mbufs */ for (i = 0; i < MAX_PKT_BURST; i++) { rte_pktmbuf_free(rx_pkt_burst[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT (2) +#define TEST_RR_POLLING_LINK_STATUS_MEMBER_COUNT (2) -uint8_t polling_slave_mac[] = {0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x00 }; +uint8_t polling_member_mac[] = {0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x00 }; -int polling_test_slaves[TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT] = { -1, -1 }; +int polling_test_members[TEST_RR_POLLING_LINK_STATUS_MEMBER_COUNT] = { -1, -1 }; static int -test_roundrobin_verfiy_polling_slave_link_status_change(void) +test_roundrobin_verify_polling_member_link_status_change(void) { struct rte_ether_addr *mac_addr = - (struct rte_ether_addr *)polling_slave_mac; - char slave_name[RTE_ETH_NAME_MAX_LEN]; + (struct rte_ether_addr *)polling_member_mac; + char member_name[RTE_ETH_NAME_MAX_LEN]; int i; - for (i = 0; i < TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT; i++) { - /* Generate slave name / MAC address */ - snprintf(slave_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_poll_%d", i); + for (i = 0; i < TEST_RR_POLLING_LINK_STATUS_MEMBER_COUNT; i++) { + /* Generate member name / MAC address */ + snprintf(member_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_poll_%d", i); mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i; - /* Create slave devices with no ISR Support */ - if (polling_test_slaves[i] == -1) { - polling_test_slaves[i] = virtual_ethdev_create(slave_name, mac_addr, + /* Create member devices with no ISR Support */ + if (polling_test_members[i] == -1) { + polling_test_members[i] = virtual_ethdev_create(member_name, mac_addr, rte_socket_id(), 0); - TEST_ASSERT(polling_test_slaves[i] >= 0, - "Failed to create virtual virtual ethdev %s\n", slave_name); + TEST_ASSERT(polling_test_members[i] >= 0, + "Failed to create virtual ethdev %s\n", member_name); - /* Configure slave */ - TEST_ASSERT_SUCCESS(configure_ethdev(polling_test_slaves[i], 0, 0), - "Failed to configure virtual ethdev %s(%d)", slave_name, - polling_test_slaves[i]); + /* Configure member */ + TEST_ASSERT_SUCCESS(configure_ethdev(polling_test_members[i], 0, 0), + "Failed to configure virtual ethdev %s(%d)", member_name, + polling_test_members[i]); } - /* Add slave to bonded device */ - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params->bonded_port_id, - polling_test_slaves[i]), - "Failed to add slave %s(%d) to bonded device %d", - slave_name, polling_test_slaves[i], - test_params->bonded_port_id); + /* Add member to bonding device */ + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(test_params->bonding_port_id, + polling_test_members[i]), + "Failed to add member %s(%d) to bonding device %d", + member_name, polling_test_members[i], + test_params->bonding_port_id); } - /* Initialize bonded device */ - TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 1, 1), - "Failed to configure bonded device %d", - test_params->bonded_port_id); + /* Initialize bonding device */ + TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonding_port_id, 1, 1), + "Failed to configure bonding device %d", + test_params->bonding_port_id); /* Register link status change interrupt callback */ - rte_eth_dev_callback_register(test_params->bonded_port_id, + rte_eth_dev_callback_register(test_params->bonding_port_id, RTE_ETH_EVENT_INTR_LSC, test_bonding_lsc_event_callback, - &test_params->bonded_port_id); + &test_params->bonding_port_id); - /* link status change callback for first slave link up */ + /* link status change callback for first member link up */ test_lsc_interrupt_count = 0; - virtual_ethdev_set_link_status(polling_test_slaves[0], 1); + virtual_ethdev_set_link_status(polling_test_members[0], 1); TEST_ASSERT_SUCCESS(lsc_timeout(15000), "timed out waiting for interrupt"); - /* no link status change callback for second slave link up */ + /* no link status change callback for second member link up */ test_lsc_interrupt_count = 0; - virtual_ethdev_set_link_status(polling_test_slaves[1], 1); + virtual_ethdev_set_link_status(polling_test_members[1], 1); TEST_ASSERT_FAIL(lsc_timeout(15000), "unexpectedly succeeded"); - /* link status change callback for both slave links down */ + /* link status change callback for both member links down */ test_lsc_interrupt_count = 0; - virtual_ethdev_set_link_status(polling_test_slaves[0], 0); - virtual_ethdev_set_link_status(polling_test_slaves[1], 0); + virtual_ethdev_set_link_status(polling_test_members[0], 0); + virtual_ethdev_set_link_status(polling_test_members[1], 0); TEST_ASSERT_SUCCESS(lsc_timeout(20000), "timed out waiting for interrupt"); /* Un-Register link status change interrupt callback */ - rte_eth_dev_callback_unregister(test_params->bonded_port_id, + rte_eth_dev_callback_unregister(test_params->bonding_port_id, RTE_ETH_EVENT_INTR_LSC, test_bonding_lsc_event_callback, - &test_params->bonded_port_id); + &test_params->bonding_port_id); - /* Clean up and remove slaves from bonded device */ - for (i = 0; i < TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT; i++) { + /* Clean up and remove members from bonding device */ + for (i = 0; i < TEST_RR_POLLING_LINK_STATUS_MEMBER_COUNT; i++) { TEST_ASSERT_SUCCESS( - rte_eth_bond_slave_remove(test_params->bonded_port_id, - polling_test_slaves[i]), - "Failed to remove slave %d from bonded port (%d)", - polling_test_slaves[i], test_params->bonded_port_id); + rte_eth_bond_member_remove(test_params->bonding_port_id, + polling_test_members[i]), + "Failed to remove member %d from bonding port (%d)", + polling_test_members[i], test_params->bonding_port_id); } - return remove_slaves_and_stop_bonded_device(); + return remove_members_and_stop_bonding_device(); } @@ -2123,9 +2143,9 @@ test_activebackup_tx_burst(void) struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_eth_stats port_stats; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ACTIVE_BACKUP, 0, 1, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); initialize_eth_header(test_params->pkt_eth_hdr, (struct rte_ether_addr *)src_mac, @@ -2136,7 +2156,7 @@ test_activebackup_tx_burst(void) pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr, dst_addr_0, pktlen); - burst_size = 20 * test_params->bonded_slave_count; + burst_size = 20 * test_params->bonding_member_count; TEST_ASSERT(burst_size < MAX_PKT_BURST, "Burst size specified is greater than supported."); @@ -2147,51 +2167,51 @@ test_activebackup_tx_burst(void) test_params->pkt_udp_hdr, burst_size, PACKET_BURST_GEN_PKT_LEN, 1), burst_size, "failed to generate burst correctly"); - /* Send burst on bonded port */ - TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst, + /* Send burst on bonding port */ + TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst, burst_size), burst_size, "tx burst failed"); - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, burst_size); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); - /* Verify slave ports tx stats */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats); - if (test_params->slave_port_ids[i] == primary_port) { + /* Verify member ports tx stats */ + for (i = 0; i < test_params->bonding_member_count; i++) { + rte_eth_stats_get(test_params->member_port_ids[i], &port_stats); + if (test_params->member_port_ids[i] == primary_port) { TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, - burst_size / test_params->bonded_slave_count); + burst_size / test_params->bonding_member_count); } else { TEST_ASSERT_EQUAL(port_stats.opackets, 0, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, 0); } } - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ - TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonded_port_id, 0, + /* Send burst on bonding port */ + TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst, burst_size), 0, "Sending empty burst failed"); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT (4) +#define TEST_ACTIVE_BACKUP_RX_BURST_MEMBER_COUNT (4) static int test_activebackup_rx_burst(void) @@ -2205,60 +2225,63 @@ test_activebackup_rx_burst(void) int i, j, burst_size = 17; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ACTIVE_BACKUP, 0, - TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT, 1), - "Failed to initialize bonded device with slaves"); + TEST_ACTIVE_BACKUP_RX_BURST_MEMBER_COUNT, 1), + "Failed to initialize bonding device with members"); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); TEST_ASSERT(primary_port >= 0, - "failed to get primary slave for bonded port (%d)", - test_params->bonded_port_id); + "failed to get primary member for bonding port (%d)", + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { /* Generate test bursts of packets to transmit */ TEST_ASSERT_EQUAL(generate_test_burst( &gen_pkt_burst[0], burst_size, 0, 1, 0, 0, 0), burst_size, "burst generation failed"); - /* Add rx data to slave */ - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + /* Add rx data to member */ + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[0], burst_size); - /* Call rx burst on bonded device */ - TEST_ASSERT_EQUAL(rte_eth_rx_burst(test_params->bonded_port_id, 0, + /* Call rx burst on bonding device */ + TEST_ASSERT_EQUAL(rte_eth_rx_burst(test_params->bonding_port_id, 0, &rx_pkt_burst[0], MAX_PKT_BURST), burst_size, "rte_eth_rx_burst failed"); - if (test_params->slave_port_ids[i] == primary_port) { - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + if (test_params->member_port_ids[i] == primary_port) { + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, - "Bonded Port (%d) ipackets value (%u) not as expected (%d)", - test_params->bonded_port_id, + "Bonding Port (%d) ipackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.ipackets, burst_size); - /* Verify bonded slave devices rx count */ - for (j = 0; j < test_params->bonded_slave_count; j++) { - rte_eth_stats_get(test_params->slave_port_ids[j], &port_stats); + /* Verify bonding member devices rx count */ + for (j = 0; j < test_params->bonding_member_count; j++) { + rte_eth_stats_get(test_params->member_port_ids[j], &port_stats); if (i == j) { TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, - "Slave Port (%d) ipackets value (%u) not as " - "expected (%d)", test_params->slave_port_ids[i], - (unsigned int)port_stats.ipackets, burst_size); + "Member Port (%d) ipackets value (%u) not as " + "expected (%d)", + test_params->member_port_ids[i], + (unsigned int)port_stats.ipackets, + burst_size); } else { TEST_ASSERT_EQUAL(port_stats.ipackets, 0, - "Slave Port (%d) ipackets value (%u) not as " - "expected (%d)\n", test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as " + "expected (%d)\n", + test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, 0); } } } else { - for (j = 0; j < test_params->bonded_slave_count; j++) { - rte_eth_stats_get(test_params->slave_port_ids[j], &port_stats); + for (j = 0; j < test_params->bonding_member_count; j++) { + rte_eth_stats_get(test_params->member_port_ids[j], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, 0, - "Slave Port (%d) ipackets value (%u) not as expected " - "(%d)", test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as expected " + "(%d)", test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, 0); } } @@ -2271,12 +2294,12 @@ test_activebackup_rx_burst(void) } } - /* reset bonded device stats */ - rte_eth_stats_reset(test_params->bonded_port_id); + /* reset bonding device stats */ + rte_eth_stats_reset(test_params->bonding_port_id); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -2285,59 +2308,59 @@ test_activebackup_verify_promiscuous_enable_disable(void) int i, primary_port, promiscuous_en; int ret; - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ACTIVE_BACKUP, 0, 4, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); TEST_ASSERT(primary_port >= 0, - "failed to get primary slave for bonded port (%d)", - test_params->bonded_port_id); + "failed to get primary member for bonding port (%d)", + test_params->bonding_port_id); - ret = rte_eth_promiscuous_enable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_enable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to enable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonded_port_id), 1, + TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonding_port_id), 1, "Port (%d) promiscuous mode not enabled", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { promiscuous_en = rte_eth_promiscuous_get( - test_params->slave_port_ids[i]); - if (primary_port == test_params->slave_port_ids[i]) { + test_params->member_port_ids[i]); + if (primary_port == test_params->member_port_ids[i]) { TEST_ASSERT_EQUAL(promiscuous_en, 1, - "slave port (%d) promiscuous mode not enabled", - test_params->slave_port_ids[i]); + "member port (%d) promiscuous mode not enabled", + test_params->member_port_ids[i]); } else { TEST_ASSERT_EQUAL(promiscuous_en, 0, - "slave port (%d) promiscuous mode enabled", - test_params->slave_port_ids[i]); + "member port (%d) promiscuous mode enabled", + test_params->member_port_ids[i]); } } - ret = rte_eth_promiscuous_disable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_disable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to disable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonded_port_id), 0, + TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonding_port_id), 0, "Port (%d) promiscuous mode not disabled\n", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { promiscuous_en = rte_eth_promiscuous_get( - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_EQUAL(promiscuous_en, 0, - "slave port (%d) promiscuous mode not disabled\n", - test_params->slave_port_ids[i]); + "member port (%d) promiscuous mode not disabled\n", + test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -2346,152 +2369,156 @@ test_activebackup_verify_mac_assignment(void) struct rte_ether_addr read_mac_addr; struct rte_ether_addr expected_mac_addr_0, expected_mac_addr_1; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &expected_mac_addr_0), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], + &expected_mac_addr_0), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &expected_mac_addr_1), + test_params->member_port_ids[0]); + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], + &expected_mac_addr_1), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - /* Initialize bonded device with 2 slaves in active backup mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 2 members in active backup mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ACTIVE_BACKUP, 0, 2, 1), - "Failed to initialize bonded device with slaves"); + "Failed to initialize bonding device with members"); - /* Verify that bonded MACs is that of first slave and that the other slave + /* Verify that bonding MACs is that of first member and that the other member * MAC hasn't been changed */ - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[1]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[1]); /* change primary and verify that MAC addresses haven't changed */ - TEST_ASSERT_EQUAL(rte_eth_bond_primary_set(test_params->bonded_port_id, - test_params->slave_port_ids[1]), 0, - "Failed to set bonded port (%d) primary port to (%d)", - test_params->bonded_port_id, test_params->slave_port_ids[1]); + TEST_ASSERT_EQUAL(rte_eth_bond_primary_set(test_params->bonding_port_id, + test_params->member_port_ids[1]), 0, + "Failed to set bonding port (%d) primary port to (%d)", + test_params->bonding_port_id, test_params->member_port_ids[1]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[1]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[1]); - /* stop / start bonded device and verify that primary MAC address is - * propagated to bonded device and slaves */ + /* + * stop / start bonding device and verify that primary MAC address is + * propagated to bonding device and members. + */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), "Failed to start device"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[0]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[1]); /* Set explicit MAC address */ TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, - (struct rte_ether_addr *)bonded_mac), + test_params->bonding_port_id, + (struct rte_ether_addr *)bonding_mac), "failed to set MAC address"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of bonded port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of bonding port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[0]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->member_port_ids[1]); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of bonded port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of bonding port", + test_params->member_port_ids[1]); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int -test_activebackup_verify_slave_link_status_change_failover(void) +test_activebackup_verify_member_link_status_change_failover(void) { - struct rte_mbuf *pkt_burst[TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT][MAX_PKT_BURST]; + struct rte_mbuf *pkt_burst[TEST_ACTIVE_BACKUP_RX_BURST_MEMBER_COUNT][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - int i, burst_size, slave_count, primary_port; + int i, burst_size, member_count, primary_port; burst_size = 21; @@ -2502,130 +2529,130 @@ test_activebackup_verify_slave_link_status_change_failover(void) &pkt_burst[0][0], burst_size, 0, 1, 0, 0, 0), burst_size, "generate_test_burst failed"); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ACTIVE_BACKUP, 0, - TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT, 1), - "Failed to initialize bonded device with slaves"); + TEST_ACTIVE_BACKUP_RX_BURST_MEMBER_COUNT, 1), + "Failed to initialize bonding device with members"); - /* Verify Current Slaves Count /Active Slave Count is */ - slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves, + /* Verify Current Members Count /Active Member Count is */ + member_count = rte_eth_bond_members_get(test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 4, - "Number of slaves (%d) is not as expected (%d).", - slave_count, 4); - - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 4, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 4); - - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); - TEST_ASSERT_EQUAL(primary_port, test_params->slave_port_ids[0], + TEST_ASSERT_EQUAL(member_count, 4, + "Number of members (%d) is not as expected (%d).", + member_count, 4); + + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, 4, + "Number of active members (%d) is not as expected (%d).", + member_count, 4); + + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); + TEST_ASSERT_EQUAL(primary_port, test_params->member_port_ids[0], "Primary port not as expected"); - /* Bring 2 slaves down and verify active slave count */ + /* Bring 2 members down and verify active member count */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 0); + test_params->member_port_ids[1], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 0); + test_params->member_port_ids[3], 0); - TEST_ASSERT_EQUAL(rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS), 2, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 2); + TEST_ASSERT_EQUAL(rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS), 2, + "Number of active members (%d) is not as expected (%d).", + member_count, 2); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 1); + test_params->member_port_ids[1], 1); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 1); + test_params->member_port_ids[3], 1); - /* Bring primary port down, verify that active slave count is 3 and primary + /* Bring primary port down, verify that active member count is 3 and primary * has changed */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[0], 0); + test_params->member_port_ids[0], 0); - TEST_ASSERT_EQUAL(rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS), + TEST_ASSERT_EQUAL(rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS), 3, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 3); + "Number of active members (%d) is not as expected (%d).", + member_count, 3); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); - TEST_ASSERT_EQUAL(primary_port, test_params->slave_port_ids[2], + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); + TEST_ASSERT_EQUAL(primary_port, test_params->member_port_ids[2], "Primary port not as expected"); - /* Verify that pkts are sent on new primary slave */ + /* Verify that pkts are sent on new primary member */ TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, &pkt_burst[0][0], + test_params->bonding_port_id, 0, &pkt_burst[0][0], burst_size), burst_size, "rte_eth_tx_burst failed"); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[2]); + test_params->member_port_ids[2]); - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[3]); + test_params->member_port_ids[3]); /* Generate packet burst for testing */ - for (i = 0; i < TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_ACTIVE_BACKUP_RX_BURST_MEMBER_COUNT; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &pkt_burst[i][0], burst_size, 0, 1, 0, 0, 0), burst_size, "generate_test_burst failed"); virtual_ethdev_add_mbufs_to_rx_queue( - test_params->slave_port_ids[i], &pkt_burst[i][0], burst_size); + test_params->member_port_ids[i], &pkt_burst[i][0], burst_size); } TEST_ASSERT_EQUAL(rte_eth_rx_burst( - test_params->bonded_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), + test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), burst_size, "rte_eth_rx_burst\n"); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, "(%d) port_stats.ipackets not as expected", - test_params->bonded_port_id); + test_params->bonding_port_id); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[2]); + test_params->member_port_ids[2]); - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[3]); + test_params->member_port_ids[3]); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } /** Balance Mode Tests */ @@ -2633,43 +2660,43 @@ test_activebackup_verify_slave_link_status_change_failover(void) static int test_balance_xmit_policy_configuration(void) { - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_ACTIVE_BACKUP, 0, 2, 1), - "Failed to initialize_bonded_device_with_slaves."); + "Failed to initialize_bonding_device_with_members."); /* Invalid port id */ TEST_ASSERT_FAIL(rte_eth_bond_xmit_policy_set( INVALID_PORT_ID, BALANCE_XMIT_POLICY_LAYER2), "Expected call to failed as invalid port specified."); - /* Set xmit policy on non bonded device */ + /* Set xmit policy on non bonding device */ TEST_ASSERT_FAIL(rte_eth_bond_xmit_policy_set( - test_params->slave_port_ids[0], BALANCE_XMIT_POLICY_LAYER2), + test_params->member_port_ids[0], BALANCE_XMIT_POLICY_LAYER2), "Expected call to failed as invalid port specified."); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER2), "Failed to set balance xmit policy."); - TEST_ASSERT_EQUAL(rte_eth_bond_xmit_policy_get(test_params->bonded_port_id), + TEST_ASSERT_EQUAL(rte_eth_bond_xmit_policy_get(test_params->bonding_port_id), BALANCE_XMIT_POLICY_LAYER2, "balance xmit policy not as expected."); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER23), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER23), "Failed to set balance xmit policy."); - TEST_ASSERT_EQUAL(rte_eth_bond_xmit_policy_get(test_params->bonded_port_id), + TEST_ASSERT_EQUAL(rte_eth_bond_xmit_policy_get(test_params->bonding_port_id), BALANCE_XMIT_POLICY_LAYER23, "balance xmit policy not as expected."); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER34), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER34), "Failed to set balance xmit policy."); - TEST_ASSERT_EQUAL(rte_eth_bond_xmit_policy_get(test_params->bonded_port_id), + TEST_ASSERT_EQUAL(rte_eth_bond_xmit_policy_get(test_params->bonding_port_id), BALANCE_XMIT_POLICY_LAYER34, "balance xmit policy not as expected."); @@ -2677,28 +2704,28 @@ test_balance_xmit_policy_configuration(void) TEST_ASSERT_FAIL(rte_eth_bond_xmit_policy_get(INVALID_PORT_ID), "Expected call to failed as invalid port specified."); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_BALANCE_L2_TX_BURST_SLAVE_COUNT (2) +#define TEST_BALANCE_L2_TX_BURST_MEMBER_COUNT (2) static int test_balance_l2_tx_burst(void) { - struct rte_mbuf *pkts_burst[TEST_BALANCE_L2_TX_BURST_SLAVE_COUNT][MAX_PKT_BURST]; - int burst_size[TEST_BALANCE_L2_TX_BURST_SLAVE_COUNT] = { 10, 15 }; + struct rte_mbuf *pkts_burst[TEST_BALANCE_L2_TX_BURST_MEMBER_COUNT][MAX_PKT_BURST]; + int burst_size[TEST_BALANCE_L2_TX_BURST_MEMBER_COUNT] = { 10, 15 }; uint16_t pktlen; int i; struct rte_eth_stats port_stats; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( - BONDING_MODE_BALANCE, 0, TEST_BALANCE_L2_TX_BURST_SLAVE_COUNT, 1), - "Failed to initialize_bonded_device_with_slaves."); + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( + BONDING_MODE_BALANCE, 0, TEST_BALANCE_L2_TX_BURST_MEMBER_COUNT, 1), + "Failed to initialize_bonding_device_with_members."); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER2), "Failed to set balance xmit policy."); initialize_eth_header(test_params->pkt_eth_hdr, @@ -2729,49 +2756,49 @@ test_balance_l2_tx_burst(void) PACKET_BURST_GEN_PKT_LEN, 1), burst_size[1], "failed to generate packet burst"); - /* Send burst 1 on bonded port */ - for (i = 0; i < TEST_BALANCE_L2_TX_BURST_SLAVE_COUNT; i++) { - TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonded_port_id, 0, + /* Send burst 1 on bonding port */ + for (i = 0; i < TEST_BALANCE_L2_TX_BURST_MEMBER_COUNT; i++) { + TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonding_port_id, 0, &pkts_burst[i][0], burst_size[i]), burst_size[i], "Failed to transmit packet burst"); } - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(burst_size[0] + burst_size[1]), - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, burst_size[0] + burst_size[1]); - /* Verify slave ports tx stats */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + /* Verify member ports tx stats */ + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size[0], - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], (unsigned int)port_stats.opackets, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.opackets, burst_size[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size[1], - "Slave Port (%d) opackets value (%u) not as expected (%d)\n", - test_params->slave_port_ids[1], (unsigned int)port_stats.opackets, + "Member Port (%d) opackets value (%u) not as expected (%d)\n", + test_params->member_port_ids[1], (unsigned int)port_stats.opackets, burst_size[1]); - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, &pkts_burst[0][0], burst_size[0]), + test_params->bonding_port_id, 0, &pkts_burst[0][0], burst_size[0]), 0, "Expected zero packet"); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -2785,12 +2812,12 @@ balance_l23_tx_burst(uint8_t vlan_enabled, uint8_t ipv4, struct rte_eth_stats port_stats; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BALANCE, 0, 2, 1), - "Failed to initialize_bonded_device_with_slaves."); + "Failed to initialize_bonding_device_with_members."); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER23), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER23), "Failed to set balance xmit policy."); burst_size_1 = 20; @@ -2808,51 +2835,51 @@ balance_l23_tx_burst(uint8_t vlan_enabled, uint8_t ipv4, toggle_mac_addr, toggle_ip_addr, 0), burst_size_2, "failed to generate packet burst"); - /* Send burst 1 on bonded port */ - nb_tx_1 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_1, + /* Send burst 1 on bonding port */ + nb_tx_1 = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst_1, burst_size_1); TEST_ASSERT_EQUAL(nb_tx_1, burst_size_1, "tx burst failed"); - /* Send burst 2 on bonded port */ - nb_tx_2 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_2, + /* Send burst 2 on bonding port */ + nb_tx_2 = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst_2, burst_size_2); TEST_ASSERT_EQUAL(nb_tx_2, burst_size_2, "tx burst failed"); - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(nb_tx_1 + nb_tx_2), - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, nb_tx_1 + nb_tx_2); - /* Verify slave ports tx stats */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + /* Verify member ports tx stats */ + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)nb_tx_1, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], (unsigned int)port_stats.opackets, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.opackets, nb_tx_1); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)nb_tx_2, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[1], (unsigned int)port_stats.opackets, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[1], (unsigned int)port_stats.opackets, nb_tx_2); - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, pkts_burst_1, + test_params->bonding_port_id, 0, pkts_burst_1, burst_size_1), 0, "Expected zero packet"); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -2897,12 +2924,12 @@ balance_l34_tx_burst(uint8_t vlan_enabled, uint8_t ipv4, struct rte_eth_stats port_stats; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BALANCE, 0, 2, 1), - "Failed to initialize_bonded_device_with_slaves."); + "Failed to initialize_bonding_device_with_members."); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER34), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER34), "Failed to set balance xmit policy."); burst_size_1 = 20; @@ -2920,51 +2947,51 @@ balance_l34_tx_burst(uint8_t vlan_enabled, uint8_t ipv4, vlan_enabled, ipv4, toggle_mac_addr, toggle_ip_addr, toggle_udp_port), burst_size_2, "failed to generate burst"); - /* Send burst 1 on bonded port */ - nb_tx_1 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_1, + /* Send burst 1 on bonding port */ + nb_tx_1 = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst_1, burst_size_1); TEST_ASSERT_EQUAL(nb_tx_1, burst_size_1, "tx burst failed"); - /* Send burst 2 on bonded port */ - nb_tx_2 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_2, + /* Send burst 2 on bonding port */ + nb_tx_2 = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst_2, burst_size_2); TEST_ASSERT_EQUAL(nb_tx_2, burst_size_2, "tx burst failed"); - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(nb_tx_1 + nb_tx_2), - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, nb_tx_1 + nb_tx_2); - /* Verify slave ports tx stats */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + /* Verify member ports tx stats */ + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)nb_tx_1, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], (unsigned int)port_stats.opackets, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.opackets, nb_tx_1); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)nb_tx_2, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[1], (unsigned int)port_stats.opackets, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[1], (unsigned int)port_stats.opackets, nb_tx_2); - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, pkts_burst_1, + test_params->bonding_port_id, 0, pkts_burst_1, burst_size_1), 0, "Expected zero packet"); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -3003,217 +3030,219 @@ test_balance_l34_tx_burst_ipv6_toggle_udp_port(void) return balance_l34_tx_burst(0, 0, 0, 0, 1); } -#define TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT (2) -#define TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 (40) -#define TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2 (20) -#define TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT (25) -#define TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX (0) +#define TEST_BAL_MEMBER_TX_FAIL_MEMBER_COUNT (2) +#define TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 (40) +#define TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2 (20) +#define TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT (25) +#define TEST_BAL_MEMBER_TX_FAIL_FAILING_MEMBER_IDX (0) static int -test_balance_tx_burst_slave_tx_fail(void) +test_balance_tx_burst_member_tx_fail(void) { - struct rte_mbuf *pkts_burst_1[TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1]; - struct rte_mbuf *pkts_burst_2[TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2]; + struct rte_mbuf *pkts_burst_1[TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1]; + struct rte_mbuf *pkts_burst_2[TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2]; - struct rte_mbuf *expected_fail_pkts[TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT]; + struct rte_mbuf *expected_fail_pkts[TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT]; struct rte_eth_stats port_stats; int i, first_tx_fail_idx, tx_count_1, tx_count_2; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BALANCE, 0, - TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT, 1), - "Failed to initialise bonded device"); + TEST_BAL_MEMBER_TX_FAIL_MEMBER_COUNT, 1), + "Failed to initialise bonding device"); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER2), "Failed to set balance xmit policy."); /* Generate test bursts for transmission */ TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst_1, - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1, 0, 0, 0, 0, 0), - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1, + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1, 0, 0, 0, 0, 0), + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1, "Failed to generate test packet burst 1"); - first_tx_fail_idx = TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; + first_tx_fail_idx = TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT; /* copy mbuf references for expected transmission failures */ - for (i = 0; i < TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; i++) + for (i = 0; i < TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT; i++) expected_fail_pkts[i] = pkts_burst_1[i + first_tx_fail_idx]; TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst_2, - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, 0, 0, 1, 0, 0), - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2, 0, 0, 1, 0, 0), + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2, "Failed to generate test packet burst 2"); - /* Set virtual slave TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX to only fail - * transmission of TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT packets of burst */ + /* + * Set virtual member TEST_BAL_MEMBER_TX_FAIL_FAILING_MEMBER_IDX to only fail + * transmission of TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT packets of burst. + */ virtual_ethdev_tx_burst_fn_set_success( - test_params->slave_port_ids[TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + test_params->member_port_ids[TEST_BAL_MEMBER_TX_FAIL_FAILING_MEMBER_IDX], 0); virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( - test_params->slave_port_ids[TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + test_params->member_port_ids[TEST_BAL_MEMBER_TX_FAIL_FAILING_MEMBER_IDX], + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT); /* Transmit burst 1 */ - tx_count_1 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_1, - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1); + tx_count_1 = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst_1, + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1); - TEST_ASSERT_EQUAL(tx_count_1, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, + TEST_ASSERT_EQUAL(tx_count_1, TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT, "Transmitted (%d) packets, expected to transmit (%d) packets", - tx_count_1, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + tx_count_1, TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT); /* Verify that failed packet are expected failed packets */ - for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + for (i = 0; i < TEST_RR_MEMBER_TX_FAIL_PACKETS_COUNT; i++) { TEST_ASSERT_EQUAL(expected_fail_pkts[i], pkts_burst_1[i + tx_count_1], "expected mbuf (%d) pointer %p not expected pointer %p", i, expected_fail_pkts[i], pkts_burst_1[i + tx_count_1]); } /* Transmit burst 2 */ - tx_count_2 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_2, - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + tx_count_2 = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst_2, + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2); - TEST_ASSERT_EQUAL(tx_count_2, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + TEST_ASSERT_EQUAL(tx_count_2, TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2, "Transmitted (%d) packets, expected to transmit (%d) packets", - tx_count_2, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + tx_count_2, TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2); - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)((TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT) + - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2), - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, - (TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT) + - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + (uint64_t)((TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT) + + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2), + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, + (TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT) + + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2); - /* Verify slave ports tx stats */ + /* Verify member ports tx stats */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t) - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.opackets, - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, - "Slave Port (%d) opackets value (%u) not as expected (%d)", - test_params->slave_port_ids[1], + (uint64_t)TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2, + "Member Port (%d) opackets value (%u) not as expected (%d)", + test_params->member_port_ids[1], (unsigned int)port_stats.opackets, - TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + TEST_BAL_MEMBER_TX_FAIL_BURST_SIZE_2); /* Verify that all mbufs have a ref value of zero */ TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkts_burst_1[tx_count_1], - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, 1), + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT, 1), "mbufs refcnts not as expected"); free_mbufs(&pkts_burst_1[tx_count_1], - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_BALANCE_RX_BURST_SLAVE_COUNT (3) +#define TEST_BALANCE_RX_BURST_MEMBER_COUNT (3) static int test_balance_rx_burst(void) { - struct rte_mbuf *gen_pkt_burst[TEST_BALANCE_RX_BURST_SLAVE_COUNT][MAX_PKT_BURST]; + struct rte_mbuf *gen_pkt_burst[TEST_BALANCE_RX_BURST_MEMBER_COUNT][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - int burst_size[TEST_BALANCE_RX_BURST_SLAVE_COUNT] = { 10, 5, 30 }; + int burst_size[TEST_BALANCE_RX_BURST_MEMBER_COUNT] = { 10, 5, 30 }; int i, j; memset(gen_pkt_burst, 0, sizeof(gen_pkt_burst)); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BALANCE, 0, 3, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); /* Generate test bursts of packets to transmit */ - for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_BALANCE_RX_BURST_MEMBER_COUNT; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &gen_pkt_burst[i][0], burst_size[i], 0, 0, 1, 0, 0), burst_size[i], "failed to generate packet burst"); } - /* Add rx data to slaves */ - for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) { - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + /* Add rx data to members */ + for (i = 0; i < TEST_BALANCE_RX_BURST_MEMBER_COUNT; i++) { + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[i][0], burst_size[i]); } - /* Call rx burst on bonded device */ - /* Send burst on bonded port */ - TEST_ASSERT_EQUAL(rte_eth_rx_burst(test_params->bonded_port_id, 0, + /* Call rx burst on bonding device */ + /* Send burst on bonding port */ + TEST_ASSERT_EQUAL(rte_eth_rx_burst(test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), burst_size[0] + burst_size[1] + burst_size[2], "balance rx burst failed\n"); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)(burst_size[0] + burst_size[1] + burst_size[2]), - "Bonded Port (%d) ipackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.ipackets, + "Bonding Port (%d) ipackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.ipackets, burst_size[0] + burst_size[1] + burst_size[2]); - /* Verify bonded slave devices rx counts */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + /* Verify bonding member devices rx counts */ + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[0], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.ipackets, burst_size[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[1], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[1], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[1], (unsigned int)port_stats.ipackets, burst_size[1]); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[2], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[2], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[2], (unsigned int)port_stats.ipackets, burst_size[2]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, 0, - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[3], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[3], (unsigned int)port_stats.ipackets, 0); /* free mbufs */ - for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_BALANCE_RX_BURST_MEMBER_COUNT; i++) { for (j = 0; j < MAX_PKT_BURST; j++) { if (gen_pkt_burst[i][j] != NULL) { rte_pktmbuf_free(gen_pkt_burst[i][j]); @@ -3222,8 +3251,8 @@ test_balance_rx_burst(void) } } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -3232,45 +3261,45 @@ test_balance_verify_promiscuous_enable_disable(void) int i; int ret; - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BALANCE, 0, 4, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); - ret = rte_eth_promiscuous_enable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_enable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to enable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonded_port_id), 1, + TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonding_port_id), 1, "Port (%d) promiscuous mode not enabled", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { TEST_ASSERT_EQUAL(rte_eth_promiscuous_get( - test_params->slave_port_ids[i]), 1, + test_params->member_port_ids[i]), 1, "Port (%d) promiscuous mode not enabled", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); } - ret = rte_eth_promiscuous_disable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_disable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to disable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonded_port_id), 0, + TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonding_port_id), 0, "Port (%d) promiscuous mode not disabled", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { TEST_ASSERT_EQUAL(rte_eth_promiscuous_get( - test_params->slave_port_ids[i]), 0, + test_params->member_port_ids[i]), 0, "Port (%d) promiscuous mode not disabled", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -3279,193 +3308,199 @@ test_balance_verify_mac_assignment(void) struct rte_ether_addr read_mac_addr; struct rte_ether_addr expected_mac_addr_0, expected_mac_addr_1; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &expected_mac_addr_0), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], + &expected_mac_addr_0), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &expected_mac_addr_1), + test_params->member_port_ids[0]); + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], + &expected_mac_addr_1), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - /* Initialize bonded device with 2 slaves in active backup mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 2 members in active backup mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BALANCE, 0, 2, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); - /* Verify that bonded MACs is that of first slave and that the other slave + /* Verify that bonding MACs is that of first member and that the other member * MAC hasn't been changed */ - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[1]); /* change primary and verify that MAC addresses haven't changed */ - TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonded_port_id, - test_params->slave_port_ids[1]), - "Failed to set bonded port (%d) primary port to (%d)\n", - test_params->bonded_port_id, test_params->slave_port_ids[1]); + TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonding_port_id, + test_params->member_port_ids[1]), + "Failed to set bonding port (%d) primary port to (%d)\n", + test_params->bonding_port_id, test_params->member_port_ids[1]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[1]); - /* stop / start bonded device and verify that primary MAC address is - * propagated to bonded device and slaves */ + /* + * stop / start bonding device and verify that primary MAC address is + * propagated to bonding device and members. + */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), - "Failed to start bonded device"); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), + "Failed to start bonding device"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[1]); /* Set explicit MAC address */ TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, - (struct rte_ether_addr *)bonded_mac), + test_params->bonding_port_id, + (struct rte_ether_addr *)bonding_mac), "failed to set MAC"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of bonded port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of bonding port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->member_port_ids[0]); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected\n", - test_params->slave_port_ids[0]); + "member port (%d) mac address not as expected\n", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->member_port_ids[1]); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of bonded port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of bonding port", + test_params->member_port_ids[1]); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_BALANCE_LINK_STATUS_SLAVE_COUNT (4) +#define TEST_BALANCE_LINK_STATUS_MEMBER_COUNT (4) static int -test_balance_verify_slave_link_status_change_behaviour(void) +test_balance_verify_member_link_status_change_behaviour(void) { - struct rte_mbuf *pkt_burst[TEST_BALANCE_LINK_STATUS_SLAVE_COUNT][MAX_PKT_BURST]; + struct rte_mbuf *pkt_burst[TEST_BALANCE_LINK_STATUS_MEMBER_COUNT][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - int i, burst_size, slave_count; + int i, burst_size, member_count; memset(pkt_burst, 0, sizeof(pkt_burst)); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( - BONDING_MODE_BALANCE, 0, TEST_BALANCE_LINK_STATUS_SLAVE_COUNT, 1), - "Failed to initialise bonded device"); + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( + BONDING_MODE_BALANCE, 0, TEST_BALANCE_LINK_STATUS_MEMBER_COUNT, 1), + "Failed to initialise bonding device"); TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( - test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2), + test_params->bonding_port_id, BALANCE_XMIT_POLICY_LAYER2), "Failed to set balance xmit policy."); - /* Verify Current Slaves Count /Active Slave Count is */ - slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves, + /* Verify Current Members Count /Active Member Count is */ + member_count = rte_eth_bond_members_get(test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, TEST_BALANCE_LINK_STATUS_SLAVE_COUNT, - "Number of slaves (%d) is not as expected (%d).", - slave_count, TEST_BALANCE_LINK_STATUS_SLAVE_COUNT); + TEST_ASSERT_EQUAL(member_count, TEST_BALANCE_LINK_STATUS_MEMBER_COUNT, + "Number of members (%d) is not as expected (%d).", + member_count, TEST_BALANCE_LINK_STATUS_MEMBER_COUNT); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, TEST_BALANCE_LINK_STATUS_SLAVE_COUNT, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, TEST_BALANCE_LINK_STATUS_SLAVE_COUNT); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, TEST_BALANCE_LINK_STATUS_MEMBER_COUNT, + "Number of active members (%d) is not as expected (%d).", + member_count, TEST_BALANCE_LINK_STATUS_MEMBER_COUNT); - /* Set 2 slaves link status to down */ + /* Set 2 members link status to down */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 0); + test_params->member_port_ids[1], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 0); + test_params->member_port_ids[3], 0); - TEST_ASSERT_EQUAL(rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS), 2, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 2); + TEST_ASSERT_EQUAL(rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS), 2, + "Number of active members (%d) is not as expected (%d).", + member_count, 2); - /* Send to sets of packet burst and verify that they are balanced across - * slaves */ + /* + * Send to sets of packet burst and verify that they are balanced across + * members. + */ burst_size = 21; TEST_ASSERT_EQUAL(generate_test_burst( @@ -3477,95 +3512,95 @@ test_balance_verify_slave_link_status_change_behaviour(void) "generate_test_burst failed"); TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, &pkt_burst[0][0], burst_size), + test_params->bonding_port_id, 0, &pkt_burst[0][0], burst_size), burst_size, "rte_eth_tx_burst failed"); TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, &pkt_burst[1][0], burst_size), + test_params->bonding_port_id, 0, &pkt_burst[1][0], burst_size), burst_size, "rte_eth_tx_burst failed"); - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(burst_size + burst_size), "(%d) port_stats.opackets (%d) not as expected (%d).", - test_params->bonded_port_id, (int)port_stats.opackets, + test_params->bonding_port_id, (int)port_stats.opackets, burst_size + burst_size); - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "(%d) port_stats.opackets (%d) not as expected (%d).", - test_params->slave_port_ids[0], (int)port_stats.opackets, + test_params->member_port_ids[0], (int)port_stats.opackets, burst_size); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "(%d) port_stats.opackets (%d) not as expected (%d).", - test_params->slave_port_ids[2], (int)port_stats.opackets, + test_params->member_port_ids[2], (int)port_stats.opackets, burst_size); - /* verify that all packets get send on primary slave when no other slaves + /* verify that all packets get send on primary member when no other members * are available */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[2], 0); + test_params->member_port_ids[2], 0); - TEST_ASSERT_EQUAL(rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS), 1, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 1); + TEST_ASSERT_EQUAL(rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS), 1, + "Number of active members (%d) is not as expected (%d).", + member_count, 1); TEST_ASSERT_EQUAL(generate_test_burst( &pkt_burst[1][0], burst_size, 0, 1, 1, 0, 0), burst_size, "generate_test_burst failed"); TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, &pkt_burst[1][0], burst_size), + test_params->bonding_port_id, 0, &pkt_burst[1][0], burst_size), burst_size, "rte_eth_tx_burst failed"); - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(burst_size + burst_size + burst_size), "(%d) port_stats.opackets (%d) not as expected (%d).\n", - test_params->bonded_port_id, (int)port_stats.opackets, + test_params->bonding_port_id, (int)port_stats.opackets, burst_size + burst_size + burst_size); - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(burst_size + burst_size), "(%d) port_stats.opackets (%d) not as expected (%d).", - test_params->slave_port_ids[0], (int)port_stats.opackets, + test_params->member_port_ids[0], (int)port_stats.opackets, burst_size + burst_size); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[0], 0); + test_params->member_port_ids[0], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 1); + test_params->member_port_ids[1], 1); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[2], 1); + test_params->member_port_ids[2], 1); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 1); + test_params->member_port_ids[3], 1); - for (i = 0; i < TEST_BALANCE_LINK_STATUS_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_BALANCE_LINK_STATUS_MEMBER_COUNT; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &pkt_burst[i][0], burst_size, 0, 1, 0, 0, 0), burst_size, "Failed to generate packet burst"); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &pkt_burst[i][0], burst_size); } - /* Verify that pkts are not received on slaves with link status down */ + /* Verify that pkts are not received on members with link status down */ - rte_eth_rx_burst(test_params->bonded_port_id, 0, rx_pkt_burst, + rte_eth_rx_burst(test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)(burst_size * 3), "(%d) port_stats.ipackets (%d) not as expected (%d)\n", - test_params->bonded_port_id, (int)port_stats.ipackets, + test_params->bonding_port_id, (int)port_stats.ipackets, burst_size * 3); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -3576,9 +3611,9 @@ test_broadcast_tx_burst(void) struct rte_eth_stats port_stats; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BROADCAST, 0, 2, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); initialize_eth_header(test_params->pkt_eth_hdr, (struct rte_ether_addr *)src_mac, @@ -3590,7 +3625,7 @@ test_broadcast_tx_burst(void) pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr, dst_addr_0, pktlen); - burst_size = 20 * test_params->bonded_slave_count; + burst_size = 20 * test_params->bonding_member_count; TEST_ASSERT(burst_size < MAX_PKT_BURST, "Burst size specified is greater than supported."); @@ -3601,242 +3636,244 @@ test_broadcast_tx_burst(void) 1, test_params->pkt_udp_hdr, burst_size, PACKET_BURST_GEN_PKT_LEN, 1), burst_size, "Failed to generate packet burst"); - /* Send burst on bonded port */ - TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonded_port_id, 0, + /* Send burst on bonding port */ + TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst, burst_size), burst_size, - "Bonded Port (%d) rx burst failed, packets transmitted value " + "Bonding Port (%d) rx burst failed, packets transmitted value " "not as expected (%d)", - test_params->bonded_port_id, burst_size); + test_params->bonding_port_id, burst_size); - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)burst_size * test_params->bonded_slave_count, - "Bonded Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, + (uint64_t)burst_size * test_params->bonding_member_count, + "Bonding Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, burst_size); - /* Verify slave ports tx stats */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats); + /* Verify member ports tx stats */ + for (i = 0; i < test_params->bonding_member_count; i++) { + rte_eth_stats_get(test_params->member_port_ids[i], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, - "Slave Port (%d) opackets value (%u) not as expected (%d)\n", - test_params->bonded_port_id, + "Member Port (%d) opackets value (%u) not as expected (%d)\n", + test_params->bonding_port_id, (unsigned int)port_stats.opackets, burst_size); } - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, pkts_burst, burst_size), 0, + test_params->bonding_port_id, 0, pkts_burst, burst_size), 0, "transmitted an unexpected number of packets"); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT (3) -#define TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE (40) -#define TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT (15) -#define TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT (10) +#define TEST_BCAST_MEMBER_TX_FAIL_MEMBER_COUNT (3) +#define TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE (40) +#define TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT (15) +#define TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT (10) static int -test_broadcast_tx_burst_slave_tx_fail(void) +test_broadcast_tx_burst_member_tx_fail(void) { - struct rte_mbuf *pkts_burst[TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE]; - struct rte_mbuf *expected_fail_pkts[TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT]; + struct rte_mbuf *pkts_burst[TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE]; + struct rte_mbuf *expected_fail_pkts[TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT]; struct rte_eth_stats port_stats; int i, tx_count; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BROADCAST, 0, - TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT, 1), - "Failed to initialise bonded device"); + TEST_BCAST_MEMBER_TX_FAIL_MEMBER_COUNT, 1), + "Failed to initialise bonding device"); /* Generate test bursts for transmission */ TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst, - TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE, 0, 0, 0, 0, 0), - TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE, + TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE, 0, 0, 0, 0, 0), + TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE, "Failed to generate test packet burst"); - for (i = 0; i < TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT; i++) { - expected_fail_pkts[i] = pkts_burst[TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT + i]; + for (i = 0; i < TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT; i++) { + expected_fail_pkts[i] = pkts_burst[TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT + i]; } - /* Set virtual slave TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX to only fail - * transmission of TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT packets of burst */ + /* + * Set virtual member TEST_BAL_MEMBER_TX_FAIL_FAILING_MEMBER_IDX to only fail + * transmission of TEST_BAL_MEMBER_TX_FAIL_PACKETS_COUNT packets of burst. + */ virtual_ethdev_tx_burst_fn_set_success( - test_params->slave_port_ids[0], + test_params->member_port_ids[0], 0); virtual_ethdev_tx_burst_fn_set_success( - test_params->slave_port_ids[1], + test_params->member_port_ids[1], 0); virtual_ethdev_tx_burst_fn_set_success( - test_params->slave_port_ids[2], + test_params->member_port_ids[2], 0); virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( - test_params->slave_port_ids[0], - TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + test_params->member_port_ids[0], + TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT); virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( - test_params->slave_port_ids[1], - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + test_params->member_port_ids[1], + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT); virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( - test_params->slave_port_ids[2], - TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + test_params->member_port_ids[2], + TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT); /* Transmit burst */ - tx_count = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst, - TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE); + tx_count = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkts_burst, + TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE); - TEST_ASSERT_EQUAL(tx_count, TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, + TEST_ASSERT_EQUAL(tx_count, TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT, "Transmitted (%d) packets, expected to transmit (%d) packets", - tx_count, TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + tx_count, TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT); /* Verify that failed packet are expected failed packets */ - for (i = 0; i < TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT; i++) { + for (i = 0; i < TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT; i++) { TEST_ASSERT_EQUAL(expected_fail_pkts[i], pkts_burst[i + tx_count], "expected mbuf (%d) pointer %p not expected pointer %p", i, expected_fail_pkts[i], pkts_burst[i + tx_count]); } - /* Verify slave ports tx stats */ + /* Verify member ports tx stats */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT, + (uint64_t)TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT, "Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, - TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + test_params->bonding_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, + (uint64_t)TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT, "Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, - TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + test_params->bonding_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, - (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT, + (uint64_t)TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT, "Port (%d) opackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.opackets, - TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - - TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + test_params->bonding_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_MEMBER_TX_FAIL_BURST_SIZE - + TEST_BCAST_MEMBER_TX_FAIL_MAX_PACKETS_COUNT); /* Verify that all mbufs who transmission failed have a ref value of one */ TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkts_burst[tx_count], - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, 1), + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT, 1), "mbufs refcnts not as expected"); free_mbufs(&pkts_burst[tx_count], - TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + TEST_BCAST_MEMBER_TX_FAIL_MIN_PACKETS_COUNT); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define BROADCAST_RX_BURST_NUM_OF_SLAVES (3) +#define BROADCAST_RX_BURST_NUM_OF_MEMBERS (3) static int test_broadcast_rx_burst(void) { - struct rte_mbuf *gen_pkt_burst[BROADCAST_RX_BURST_NUM_OF_SLAVES][MAX_PKT_BURST]; + struct rte_mbuf *gen_pkt_burst[BROADCAST_RX_BURST_NUM_OF_MEMBERS][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - int burst_size[BROADCAST_RX_BURST_NUM_OF_SLAVES] = { 10, 5, 30 }; + int burst_size[BROADCAST_RX_BURST_NUM_OF_MEMBERS] = { 10, 5, 30 }; int i, j; memset(gen_pkt_burst, 0, sizeof(gen_pkt_burst)); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BROADCAST, 0, 3, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); /* Generate test bursts of packets to transmit */ - for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_SLAVES; i++) { + for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_MEMBERS; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &gen_pkt_burst[i][0], burst_size[i], 0, 0, 1, 0, 0), burst_size[i], "failed to generate packet burst"); } - /* Add rx data to slave 0 */ - for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_SLAVES; i++) { - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + /* Add rx data to member 0 */ + for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_MEMBERS; i++) { + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[i][0], burst_size[i]); } - /* Call rx burst on bonded device */ - /* Send burst on bonded port */ + /* Call rx burst on bonding device */ + /* Send burst on bonding port */ TEST_ASSERT_EQUAL(rte_eth_rx_burst( - test_params->bonded_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), + test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), burst_size[0] + burst_size[1] + burst_size[2], "rx burst failed"); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)(burst_size[0] + burst_size[1] + burst_size[2]), - "Bonded Port (%d) ipackets value (%u) not as expected (%d)", - test_params->bonded_port_id, (unsigned int)port_stats.ipackets, + "Bonding Port (%d) ipackets value (%u) not as expected (%d)", + test_params->bonding_port_id, (unsigned int)port_stats.ipackets, burst_size[0] + burst_size[1] + burst_size[2]); - /* Verify bonded slave devices rx counts */ - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + /* Verify bonding member devices rx counts */ + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[0], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.ipackets, burst_size[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[1], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[0], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[0], (unsigned int)port_stats.ipackets, burst_size[1]); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size[2], - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[2], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[2], (unsigned int)port_stats.ipackets, burst_size[2]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, 0, - "Slave Port (%d) ipackets value (%u) not as expected (%d)", - test_params->slave_port_ids[3], (unsigned int)port_stats.ipackets, + "Member Port (%d) ipackets value (%u) not as expected (%d)", + test_params->member_port_ids[3], (unsigned int)port_stats.ipackets, 0); /* free mbufs allocate for rx testing */ - for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_SLAVES; i++) { + for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_MEMBERS; i++) { for (j = 0; j < MAX_PKT_BURST; j++) { if (gen_pkt_burst[i][j] != NULL) { rte_pktmbuf_free(gen_pkt_burst[i][j]); @@ -3845,8 +3882,8 @@ test_broadcast_rx_burst(void) } } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -3855,46 +3892,46 @@ test_broadcast_verify_promiscuous_enable_disable(void) int i; int ret; - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BROADCAST, 0, 4, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); - ret = rte_eth_promiscuous_enable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_enable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to enable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonded_port_id), 1, + TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonding_port_id), 1, "Port (%d) promiscuous mode not enabled", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { TEST_ASSERT_EQUAL(rte_eth_promiscuous_get( - test_params->slave_port_ids[i]), 1, + test_params->member_port_ids[i]), 1, "Port (%d) promiscuous mode not enabled", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); } - ret = rte_eth_promiscuous_disable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_disable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to disable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonded_port_id), 0, + TEST_ASSERT_EQUAL(rte_eth_promiscuous_get(test_params->bonding_port_id), 0, "Port (%d) promiscuous mode not disabled", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { TEST_ASSERT_EQUAL(rte_eth_promiscuous_get( - test_params->slave_port_ids[i]), 0, + test_params->member_port_ids[i]), 0, "Port (%d) promiscuous mode not disabled", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -3905,238 +3942,246 @@ test_broadcast_verify_mac_assignment(void) int i; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &expected_mac_addr_0), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], + &expected_mac_addr_0), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[2], &expected_mac_addr_1), + test_params->member_port_ids[0]); + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[2], + &expected_mac_addr_1), "Failed to get mac address (port %d)", - test_params->slave_port_ids[2]); + test_params->member_port_ids[2]); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_BROADCAST, 0, 4, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); - /* Verify that all MACs are the same as first slave added to bonded + /* Verify that all MACs are the same as first member added to bonding * device */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[i]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[i]); } /* change primary and verify that MAC addresses haven't changed */ - TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonded_port_id, - test_params->slave_port_ids[2]), - "Failed to set bonded port (%d) primary port to (%d)", - test_params->bonded_port_id, test_params->slave_port_ids[i]); - - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_bond_primary_set(test_params->bonding_port_id, + test_params->member_port_ids[2]), + "Failed to set bonding port (%d) primary port to (%d)", + test_params->bonding_port_id, test_params->member_port_ids[i]); + + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address has changed to that of primary " - "port without stop/start toggle of bonded device", - test_params->slave_port_ids[i]); + "member port (%d) mac address has changed to that of primary " + "port without stop/start toggle of bonding device", + test_params->member_port_ids[i]); } - /* stop / start bonded device and verify that primary MAC address is - * propagated to bonded device and slaves */ + /* + * stop / start bonding device and verify that primary MAC address is + * propagated to bonding device and members. + */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), - "Failed to start bonded device"); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), + "Failed to start bonding device"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of new primary port", - test_params->slave_port_ids[i]); + "bonding port (%d) mac address not set to that of new primary port", + test_params->member_port_ids[i]); - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of new primary " - "port", test_params->slave_port_ids[i]); + "member port (%d) mac address not set to that of new primary " + "port", test_params->member_port_ids[i]); } /* Set explicit MAC address */ TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, - (struct rte_ether_addr *)bonded_mac), + test_params->bonding_port_id, + (struct rte_ether_addr *)bonding_mac), "Failed to set MAC address"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(bonded_mac, &read_mac_addr, + test_params->bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of new primary port", - test_params->slave_port_ids[i]); + "bonding port (%d) mac address not set to that of new primary port", + test_params->member_port_ids[i]); - for (i = 0; i < test_params->bonded_slave_count; i++) { - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr), + for (i = 0; i < test_params->bonding_member_count; i++) { + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[i], + &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[i]); - TEST_ASSERT_SUCCESS(memcmp(bonded_mac, &read_mac_addr, + test_params->member_port_ids[i]); + TEST_ASSERT_SUCCESS(memcmp(bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of new primary " - "port", test_params->slave_port_ids[i]); + "member port (%d) mac address not set to that of new primary " + "port", test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define BROADCAST_LINK_STATUS_NUM_OF_SLAVES (4) +#define BROADCAST_LINK_STATUS_NUM_OF_MEMBERS (4) static int -test_broadcast_verify_slave_link_status_change_behaviour(void) +test_broadcast_verify_member_link_status_change_behaviour(void) { - struct rte_mbuf *pkt_burst[BROADCAST_LINK_STATUS_NUM_OF_SLAVES][MAX_PKT_BURST]; + struct rte_mbuf *pkt_burst[BROADCAST_LINK_STATUS_NUM_OF_MEMBERS][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - int i, burst_size, slave_count; + int i, burst_size, member_count; memset(pkt_burst, 0, sizeof(pkt_burst)); - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( - BONDING_MODE_BROADCAST, 0, BROADCAST_LINK_STATUS_NUM_OF_SLAVES, - 1), "Failed to initialise bonded device"); + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( + BONDING_MODE_BROADCAST, 0, BROADCAST_LINK_STATUS_NUM_OF_MEMBERS, + 1), "Failed to initialise bonding device"); - /* Verify Current Slaves Count /Active Slave Count is */ - slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves, + /* Verify Current Members Count /Active Member Count is */ + member_count = rte_eth_bond_members_get(test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 4, - "Number of slaves (%d) is not as expected (%d).", - slave_count, 4); + TEST_ASSERT_EQUAL(member_count, 4, + "Number of members (%d) is not as expected (%d).", + member_count, 4); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 4, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 4); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, 4, + "Number of active members (%d) is not as expected (%d).", + member_count, 4); - /* Set 2 slaves link status to down */ + /* Set 2 members link status to down */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 0); + test_params->member_port_ids[1], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 0); + test_params->member_port_ids[3], 0); - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 2, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 2); + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, 2, + "Number of active members (%d) is not as expected (%d).", + member_count, 2); - for (i = 0; i < test_params->bonded_slave_count; i++) - rte_eth_stats_reset(test_params->slave_port_ids[i]); + for (i = 0; i < test_params->bonding_member_count; i++) + rte_eth_stats_reset(test_params->member_port_ids[i]); - /* Verify that pkts are not sent on slaves with link status down */ + /* Verify that pkts are not sent on members with link status down */ burst_size = 21; TEST_ASSERT_EQUAL(generate_test_burst( &pkt_burst[0][0], burst_size, 0, 0, 1, 0, 0), burst_size, "generate_test_burst failed"); - TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonded_port_id, 0, + TEST_ASSERT_EQUAL(rte_eth_tx_burst(test_params->bonding_port_id, 0, &pkt_burst[0][0], burst_size), burst_size, "rte_eth_tx_burst failed\n"); - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); - TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(burst_size * slave_count), + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); + TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)(burst_size * member_count), "(%d) port_stats.opackets (%d) not as expected (%d)\n", - test_params->bonded_port_id, (int)port_stats.opackets, - burst_size * slave_count); + test_params->bonding_port_id, (int)port_stats.opackets, + burst_size * member_count); - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t)burst_size, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[2]); + test_params->member_port_ids[2]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, 0, "(%d) port_stats.opackets not as expected", - test_params->slave_port_ids[3]); + test_params->member_port_ids[3]); - for (i = 0; i < BROADCAST_LINK_STATUS_NUM_OF_SLAVES; i++) { + for (i = 0; i < BROADCAST_LINK_STATUS_NUM_OF_MEMBERS; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &pkt_burst[i][0], burst_size, 0, 0, 1, 0, 0), burst_size, "failed to generate packet burst"); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &pkt_burst[i][0], burst_size); } - /* Verify that pkts are not received on slaves with link status down */ + /* Verify that pkts are not received on members with link status down */ TEST_ASSERT_EQUAL(rte_eth_rx_burst( - test_params->bonded_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), + test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST), burst_size + burst_size, "rte_eth_rx_burst failed"); - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)(burst_size + burst_size), "(%d) port_stats.ipackets not as expected\n", - test_params->bonded_port_id); + test_params->bonding_port_id); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int -test_reconfigure_bonded_device(void) +test_reconfigure_bonding_device(void) { test_params->nb_rx_q = 4; test_params->nb_tx_q = 4; - TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 0, 0), - "failed to reconfigure bonded device"); + TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonding_port_id, 0, 0), + "failed to reconfigure bonding device"); test_params->nb_rx_q = 2; test_params->nb_tx_q = 2; - TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 0, 0), - "failed to reconfigure bonded device with less rx/tx queues"); + TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonding_port_id, 0, 0), + "failed to reconfigure bonding device with less rx/tx queues"); return 0; } static int -test_close_bonded_device(void) +test_close_bonding_device(void) { - rte_eth_dev_close(test_params->bonded_port_id); + rte_eth_dev_close(test_params->bonding_port_id); return 0; } @@ -4146,21 +4191,21 @@ testsuite_teardown(void) free(test_params->pkt_eth_hdr); test_params->pkt_eth_hdr = NULL; - /* Clean up and remove slaves from bonded device */ - remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + remove_members_and_stop_bonding_device(); } static void free_virtualpmd_tx_queue(void) { - int i, slave_port, to_free_cnt; + int i, member_port, to_free_cnt; struct rte_mbuf *pkts_to_free[MAX_PKT_BURST]; /* Free tx queue of virtual pmd */ - for (slave_port = 0; slave_port < test_params->bonded_slave_count; - slave_port++) { + for (member_port = 0; member_port < test_params->bonding_member_count; + member_port++) { to_free_cnt = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_port], + test_params->member_port_ids[member_port], pkts_to_free, MAX_PKT_BURST); for (i = 0; i < to_free_cnt; i++) rte_pktmbuf_free(pkts_to_free[i]); @@ -4177,11 +4222,11 @@ test_tlb_tx_burst(void) uint64_t sum_ports_opackets = 0, all_bond_opackets = 0, all_bond_obytes = 0; uint16_t pktlen; - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members (BONDING_MODE_TLB, 1, 3, 1), - "Failed to initialise bonded device"); + "Failed to initialise bonding device"); - burst_size = 20 * test_params->bonded_slave_count; + burst_size = 20 * test_params->bonding_member_count; TEST_ASSERT(burst_size < MAX_PKT_BURST, "Burst size specified is greater than supported.\n"); @@ -4197,7 +4242,7 @@ test_tlb_tx_burst(void) RTE_ETHER_TYPE_IPV4, 0, 0); } else { initialize_eth_header(test_params->pkt_eth_hdr, - (struct rte_ether_addr *)test_params->default_slave_mac, + (struct rte_ether_addr *)test_params->default_member_mac, (struct rte_ether_addr *)dst_mac_0, RTE_ETHER_TYPE_IPV4, 0, 0); } @@ -4208,8 +4253,8 @@ test_tlb_tx_burst(void) generate_packet_burst(test_params->mbuf_pool, pkt_burst, test_params->pkt_eth_hdr, 0, test_params->pkt_ipv4_hdr, 1, test_params->pkt_udp_hdr, burst_size, 60, 1); - /* Send burst on bonded port */ - nb_tx = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkt_burst, + /* Send burst on bonding port */ + nb_tx = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkt_burst, burst_size); nb_tx2 += nb_tx; @@ -4222,50 +4267,50 @@ test_tlb_tx_burst(void) } - /* Verify bonded port tx stats */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats[0]); + /* Verify bonding port tx stats */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats[0]); all_bond_opackets = port_stats[0].opackets; all_bond_obytes = port_stats[0].obytes; TEST_ASSERT_EQUAL(port_stats[0].opackets, (uint64_t)nb_tx2, - "Bonded Port (%d) opackets value (%u) not as expected (%d)\n", - test_params->bonded_port_id, (unsigned int)port_stats[0].opackets, + "Bonding Port (%d) opackets value (%u) not as expected (%d)\n", + test_params->bonding_port_id, (unsigned int)port_stats[0].opackets, burst_size); - /* Verify slave ports tx stats */ - for (i = 0; i < test_params->bonded_slave_count; i++) { - rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats[i]); + /* Verify member ports tx stats */ + for (i = 0; i < test_params->bonding_member_count; i++) { + rte_eth_stats_get(test_params->member_port_ids[i], &port_stats[i]); sum_ports_opackets += port_stats[i].opackets; } TEST_ASSERT_EQUAL(sum_ports_opackets, (uint64_t)all_bond_opackets, - "Total packets sent by slaves is not equal to packets sent by bond interface"); + "Total packets sent by members is not equal to packets sent by bond interface"); - /* checking if distribution of packets is balanced over slaves */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* checking if distribution of packets is balanced over members */ + for (i = 0; i < test_params->bonding_member_count; i++) { TEST_ASSERT(port_stats[i].obytes > 0 && port_stats[i].obytes < all_bond_obytes, - "Packets are not balanced over slaves"); + "Packets are not balanced over members"); } - /* Put all slaves down and try and transmit */ - for (i = 0; i < test_params->bonded_slave_count; i++) { + /* Put all members down and try and transmit */ + for (i = 0; i < test_params->bonding_member_count; i++) { virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[i], 0); + test_params->member_port_ids[i], 0); } - /* Send burst on bonded port */ - nb_tx = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkt_burst, + /* Send burst on bonding port */ + nb_tx = rte_eth_tx_burst(test_params->bonding_port_id, 0, pkt_burst, burst_size); TEST_ASSERT_EQUAL(nb_tx, 0, " bad number of packet in burst"); - /* Clean ugit checkout masterp and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT (4) +#define TEST_ADAPTIVE_TLB_RX_BURST_MEMBER_COUNT (4) static int test_tlb_rx_burst(void) @@ -4279,63 +4324,63 @@ test_tlb_rx_burst(void) uint16_t i, j, nb_rx, burst_size = 17; - /* Initialize bonded device with 4 slaves in transmit load balancing mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in transmit load balancing mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_TLB, - TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT, 1, 1), - "Failed to initialize bonded device"); + TEST_ADAPTIVE_TLB_RX_BURST_MEMBER_COUNT, 1, 1), + "Failed to initialize bonding device"); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); TEST_ASSERT(primary_port >= 0, - "failed to get primary slave for bonded port (%d)", - test_params->bonded_port_id); + "failed to get primary member for bonding port (%d)", + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { /* Generate test bursts of packets to transmit */ TEST_ASSERT_EQUAL(generate_test_burst( &gen_pkt_burst[0], burst_size, 0, 1, 0, 0, 0), burst_size, "burst generation failed"); - /* Add rx data to slave */ - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[i], + /* Add rx data to member */ + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[i], &gen_pkt_burst[0], burst_size); - /* Call rx burst on bonded device */ - nb_rx = rte_eth_rx_burst(test_params->bonded_port_id, 0, + /* Call rx burst on bonding device */ + nb_rx = rte_eth_rx_burst(test_params->bonding_port_id, 0, &rx_pkt_burst[0], MAX_PKT_BURST); TEST_ASSERT_EQUAL(nb_rx, burst_size, "rte_eth_rx_burst failed\n"); - if (test_params->slave_port_ids[i] == primary_port) { - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + if (test_params->member_port_ids[i] == primary_port) { + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, - "Bonded Port (%d) ipackets value (%u) not as expected (%d)\n", - test_params->bonded_port_id, + "Bonding Port (%d) ipackets value (%u) not as expected (%d)\n", + test_params->bonding_port_id, (unsigned int)port_stats.ipackets, burst_size); - /* Verify bonded slave devices rx count */ - for (j = 0; j < test_params->bonded_slave_count; j++) { - rte_eth_stats_get(test_params->slave_port_ids[j], &port_stats); + /* Verify bonding member devices rx count */ + for (j = 0; j < test_params->bonding_member_count; j++) { + rte_eth_stats_get(test_params->member_port_ids[j], &port_stats); if (i == j) { TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, - "Slave Port (%d) ipackets value (%u) not as expected (%d)\n", - test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as expected (%d)\n", + test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, burst_size); } else { TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)0, - "Slave Port (%d) ipackets value (%u) not as expected (%d)\n", - test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as expected (%d)\n", + test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, 0); } } } else { - for (j = 0; j < test_params->bonded_slave_count; j++) { - rte_eth_stats_get(test_params->slave_port_ids[j], &port_stats); + for (j = 0; j < test_params->bonding_member_count; j++) { + rte_eth_stats_get(test_params->member_port_ids[j], &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)0, - "Slave Port (%d) ipackets value (%u) not as expected (%d)\n", - test_params->slave_port_ids[i], + "Member Port (%d) ipackets value (%u) not as expected (%d)\n", + test_params->member_port_ids[i], (unsigned int)port_stats.ipackets, 0); } } @@ -4344,12 +4389,12 @@ test_tlb_rx_burst(void) for (i = 0; i < burst_size; i++) rte_pktmbuf_free(rx_pkt_burst[i]); - /* reset bonded device stats */ - rte_eth_stats_reset(test_params->bonded_port_id); + /* reset bonding device stats */ + rte_eth_stats_reset(test_params->bonding_port_id); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -4358,60 +4403,60 @@ test_tlb_verify_promiscuous_enable_disable(void) int i, primary_port, promiscuous_en; int ret; - /* Initialize bonded device with 4 slaves in transmit load balancing mode */ - TEST_ASSERT_SUCCESS( initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in transmit load balancing mode */ + TEST_ASSERT_SUCCESS( initialize_bonding_device_with_members( BONDING_MODE_TLB, 0, 4, 1), - "Failed to initialize bonded device"); + "Failed to initialize bonding device"); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); TEST_ASSERT(primary_port >= 0, - "failed to get primary slave for bonded port (%d)", - test_params->bonded_port_id); + "failed to get primary member for bonding port (%d)", + test_params->bonding_port_id); - ret = rte_eth_promiscuous_enable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_enable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to enable promiscuous mode for port %d: %s", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - promiscuous_en = rte_eth_promiscuous_get(test_params->bonded_port_id); + promiscuous_en = rte_eth_promiscuous_get(test_params->bonding_port_id); TEST_ASSERT_EQUAL(promiscuous_en, (int)1, "Port (%d) promiscuous mode not enabled\n", - test_params->bonded_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + test_params->bonding_port_id); + for (i = 0; i < test_params->bonding_member_count; i++) { promiscuous_en = rte_eth_promiscuous_get( - test_params->slave_port_ids[i]); - if (primary_port == test_params->slave_port_ids[i]) { + test_params->member_port_ids[i]); + if (primary_port == test_params->member_port_ids[i]) { TEST_ASSERT_EQUAL(promiscuous_en, (int)1, "Port (%d) promiscuous mode not enabled\n", - test_params->bonded_port_id); + test_params->bonding_port_id); } else { TEST_ASSERT_EQUAL(promiscuous_en, (int)0, "Port (%d) promiscuous mode enabled\n", - test_params->bonded_port_id); + test_params->bonding_port_id); } } - ret = rte_eth_promiscuous_disable(test_params->bonded_port_id); + ret = rte_eth_promiscuous_disable(test_params->bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed to disable promiscuous mode for port %d: %s\n", - test_params->bonded_port_id, rte_strerror(-ret)); + test_params->bonding_port_id, rte_strerror(-ret)); - promiscuous_en = rte_eth_promiscuous_get(test_params->bonded_port_id); + promiscuous_en = rte_eth_promiscuous_get(test_params->bonding_port_id); TEST_ASSERT_EQUAL(promiscuous_en, (int)0, "Port (%d) promiscuous mode not disabled\n", - test_params->bonded_port_id); + test_params->bonding_port_id); - for (i = 0; i < test_params->bonded_slave_count; i++) { + for (i = 0; i < test_params->bonding_member_count; i++) { promiscuous_en = rte_eth_promiscuous_get( - test_params->slave_port_ids[i]); + test_params->member_port_ids[i]); TEST_ASSERT_EQUAL(promiscuous_en, (int)0, - "slave port (%d) promiscuous mode not disabled\n", - test_params->slave_port_ids[i]); + "member port (%d) promiscuous mode not disabled\n", + test_params->member_port_ids[i]); } - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int @@ -4420,153 +4465,159 @@ test_tlb_verify_mac_assignment(void) struct rte_ether_addr read_mac_addr; struct rte_ether_addr expected_mac_addr_0, expected_mac_addr_1; - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &expected_mac_addr_0), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], + &expected_mac_addr_0), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &expected_mac_addr_1), + test_params->member_port_ids[0]); + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], + &expected_mac_addr_1), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - /* Initialize bonded device with 2 slaves in active backup mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 2 members in active backup mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_TLB, 0, 2, 1), - "Failed to initialize bonded device"); + "Failed to initialize bonding device"); - /* Verify that bonded MACs is that of first slave and that the other slave - * MAC hasn't been changed */ - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + /* + * Verify that bonding MACs is that of first member and that the other member + * MAC hasn't been changed. + */ + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[1]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[1]); /* change primary and verify that MAC addresses haven't changed */ - TEST_ASSERT_EQUAL(rte_eth_bond_primary_set(test_params->bonded_port_id, - test_params->slave_port_ids[1]), 0, - "Failed to set bonded port (%d) primary port to (%d)", - test_params->bonded_port_id, test_params->slave_port_ids[1]); + TEST_ASSERT_EQUAL(rte_eth_bond_primary_set(test_params->bonding_port_id, + test_params->member_port_ids[1]), 0, + "Failed to set bonding port (%d) primary port to (%d)", + test_params->bonding_port_id, test_params->member_port_ids[1]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[0]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[1]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[1]); - /* stop / start bonded device and verify that primary MAC address is - * propagated to bonded device and slaves */ + /* + * stop / start bonding device and verify that primary MAC address is + * propagated to bonding device and members. + */ - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonded_port_id), - "Failed to stop bonded port %u", - test_params->bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params->bonding_port_id), + "Failed to stop bonding port %u", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonded_port_id), + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params->bonding_port_id), "Failed to start device"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); + test_params->bonding_port_id); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of primary port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of primary port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[0]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_1, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of primary port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of primary port", + test_params->member_port_ids[1]); /* Set explicit MAC address */ TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set( - test_params->bonded_port_id, - (struct rte_ether_addr *)bonded_mac), + test_params->bonding_port_id, + (struct rte_ether_addr *)bonding_mac), "failed to set MAC address"); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonding_port_id, &read_mac_addr), "Failed to get mac address (port %d)", - test_params->bonded_port_id); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->bonding_port_id); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "bonded port (%d) mac address not set to that of bonded port", - test_params->bonded_port_id); + "bonding port (%d) mac address not set to that of bonding port", + test_params->bonding_port_id); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[0], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[0], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); TEST_ASSERT_SUCCESS(memcmp(&expected_mac_addr_0, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not as expected", - test_params->slave_port_ids[0]); + "member port (%d) mac address not as expected", + test_params->member_port_ids[0]); - TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->slave_port_ids[1], &read_mac_addr), + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->member_port_ids[1], &read_mac_addr), "Failed to get mac address (port %d)", - test_params->slave_port_ids[1]); - TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr, + test_params->member_port_ids[1]); + TEST_ASSERT_SUCCESS(memcmp(&bonding_mac, &read_mac_addr, sizeof(read_mac_addr)), - "slave port (%d) mac address not set to that of bonded port", - test_params->slave_port_ids[1]); + "member port (%d) mac address not set to that of bonding port", + test_params->member_port_ids[1]); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } static int -test_tlb_verify_slave_link_status_change_failover(void) +test_tlb_verify_member_link_status_change_failover(void) { - struct rte_mbuf *pkt_burst[TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT][MAX_PKT_BURST]; + struct rte_mbuf *pkt_burst[TEST_ADAPTIVE_TLB_RX_BURST_MEMBER_COUNT][MAX_PKT_BURST]; struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL }; struct rte_eth_stats port_stats; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; - int i, burst_size, slave_count, primary_port; + int i, burst_size, member_count, primary_port; burst_size = 21; @@ -4574,121 +4625,124 @@ test_tlb_verify_slave_link_status_change_failover(void) - /* Initialize bonded device with 4 slaves in round robin mode */ - TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + /* Initialize bonding device with 4 members in round robin mode */ + TEST_ASSERT_SUCCESS(initialize_bonding_device_with_members( BONDING_MODE_TLB, 0, - TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT, 1), - "Failed to initialize bonded device with slaves"); + TEST_ADAPTIVE_TLB_RX_BURST_MEMBER_COUNT, 1), + "Failed to initialize bonding device with members"); - /* Verify Current Slaves Count /Active Slave Count is */ - slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves, + /* Verify Current Members Count /Active Member Count is */ + member_count = rte_eth_bond_members_get(test_params->bonding_port_id, members, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, 4, - "Number of slaves (%d) is not as expected (%d).\n", - slave_count, 4); - - slave_count = rte_eth_bond_active_slaves_get(test_params->bonded_port_id, - slaves, RTE_MAX_ETHPORTS); - TEST_ASSERT_EQUAL(slave_count, (int)4, - "Number of slaves (%d) is not as expected (%d).\n", - slave_count, 4); - - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); - TEST_ASSERT_EQUAL(primary_port, test_params->slave_port_ids[0], + TEST_ASSERT_EQUAL(member_count, 4, + "Number of members (%d) is not as expected (%d).\n", + member_count, 4); + + member_count = rte_eth_bond_active_members_get(test_params->bonding_port_id, + members, RTE_MAX_ETHPORTS); + TEST_ASSERT_EQUAL(member_count, 4, + "Number of members (%d) is not as expected (%d).\n", + member_count, 4); + + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); + TEST_ASSERT_EQUAL(primary_port, test_params->member_port_ids[0], "Primary port not as expected"); - /* Bring 2 slaves down and verify active slave count */ + /* Bring 2 members down and verify active member count */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 0); + test_params->member_port_ids[1], 0); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 0); + test_params->member_port_ids[3], 0); - TEST_ASSERT_EQUAL(rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS), 2, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 2); + TEST_ASSERT_EQUAL(rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS), 2, + "Number of active members (%d) is not as expected (%d).", + member_count, 2); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[1], 1); + test_params->member_port_ids[1], 1); virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[3], 1); + test_params->member_port_ids[3], 1); - /* Bring primary port down, verify that active slave count is 3 and primary - * has changed */ + /* + * Bring primary port down, verify that active member count is 3 and primary + * has changed. + */ virtual_ethdev_simulate_link_status_interrupt( - test_params->slave_port_ids[0], 0); + test_params->member_port_ids[0], 0); - TEST_ASSERT_EQUAL(rte_eth_bond_active_slaves_get( - test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS), 3, - "Number of active slaves (%d) is not as expected (%d).", - slave_count, 3); + TEST_ASSERT_EQUAL(rte_eth_bond_active_members_get( + test_params->bonding_port_id, members, RTE_MAX_ETHPORTS), 3, + "Number of active members (%d) is not as expected (%d).", + member_count, 3); - primary_port = rte_eth_bond_primary_get(test_params->bonded_port_id); - TEST_ASSERT_EQUAL(primary_port, test_params->slave_port_ids[2], + primary_port = rte_eth_bond_primary_get(test_params->bonding_port_id); + TEST_ASSERT_EQUAL(primary_port, test_params->member_port_ids[2], "Primary port not as expected"); rte_delay_us(500000); - /* Verify that pkts are sent on new primary slave */ + /* Verify that pkts are sent on new primary member */ for (i = 0; i < 4; i++) { TEST_ASSERT_EQUAL(generate_test_burst( &pkt_burst[0][0], burst_size, 0, 1, 0, 0, 0), burst_size, "generate_test_burst failed\n"); TEST_ASSERT_EQUAL(rte_eth_tx_burst( - test_params->bonded_port_id, 0, &pkt_burst[0][0], burst_size), burst_size, + test_params->bonding_port_id, 0, &pkt_burst[0][0], burst_size), + burst_size, "rte_eth_tx_burst failed\n"); rte_delay_us(11000); } - rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (int8_t)0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[0]); + test_params->member_port_ids[0]); - rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[1], &port_stats); TEST_ASSERT_NOT_EQUAL(port_stats.opackets, (int8_t)0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[1]); + test_params->member_port_ids[1]); - rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[2], &port_stats); TEST_ASSERT_NOT_EQUAL(port_stats.opackets, (int8_t)0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[2]); + test_params->member_port_ids[2]); - rte_eth_stats_get(test_params->slave_port_ids[3], &port_stats); + rte_eth_stats_get(test_params->member_port_ids[3], &port_stats); TEST_ASSERT_NOT_EQUAL(port_stats.opackets, (int8_t)0, "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[3]); + test_params->member_port_ids[3]); /* Generate packet burst for testing */ - for (i = 0; i < TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT; i++) { + for (i = 0; i < TEST_ADAPTIVE_TLB_RX_BURST_MEMBER_COUNT; i++) { if (generate_test_burst(&pkt_burst[i][0], burst_size, 0, 1, 0, 0, 0) != burst_size) return -1; virtual_ethdev_add_mbufs_to_rx_queue( - test_params->slave_port_ids[i], &pkt_burst[i][0], burst_size); + test_params->member_port_ids[i], &pkt_burst[i][0], burst_size); } - if (rte_eth_rx_burst(test_params->bonded_port_id, 0, rx_pkt_burst, + if (rte_eth_rx_burst(test_params->bonding_port_id, 0, rx_pkt_burst, MAX_PKT_BURST) != burst_size) { printf("rte_eth_rx_burst\n"); return -1; } - /* Verify bonded device rx count */ - rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + /* Verify bonding device rx count */ + rte_eth_stats_get(test_params->bonding_port_id, &port_stats); TEST_ASSERT_EQUAL(port_stats.ipackets, (uint64_t)burst_size, "(%d) port_stats.ipackets not as expected\n", - test_params->bonded_port_id); + test_params->bonding_port_id); - /* Clean up and remove slaves from bonded device */ - return remove_slaves_and_stop_bonded_device(); + /* Clean up and remove members from bonding device */ + return remove_members_and_stop_bonding_device(); } -#define TEST_ALB_SLAVE_COUNT 2 +#define TEST_ALB_MEMBER_COUNT 2 static uint8_t mac_client1[] = {0x00, 0xAA, 0x55, 0xFF, 0xCC, 1}; static uint8_t mac_client2[] = {0x00, 0xAA, 0x55, 0xFF, 0xCC, 2}; @@ -4710,28 +4764,28 @@ test_alb_change_mac_in_reply_sent(void) struct rte_ether_hdr *eth_pkt; struct rte_arp_hdr *arp_pkt; - int slave_idx, nb_pkts, pkt_idx; + int member_idx, nb_pkts, pkt_idx; int retval = 0; struct rte_ether_addr bond_mac, client_mac; - struct rte_ether_addr *slave_mac1, *slave_mac2; + struct rte_ether_addr *member_mac1, *member_mac2; TEST_ASSERT_SUCCESS( - initialize_bonded_device_with_slaves(BONDING_MODE_ALB, - 0, TEST_ALB_SLAVE_COUNT, 1), - "Failed to initialize_bonded_device_with_slaves."); + initialize_bonding_device_with_members(BONDING_MODE_ALB, + 0, TEST_ALB_MEMBER_COUNT, 1), + "Failed to initialize_bonding_device_with_members."); /* Flush tx queue */ - rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); - for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; - slave_idx++) { + rte_eth_tx_burst(test_params->bonding_port_id, 0, NULL, 0); + for (member_idx = 0; member_idx < test_params->bonding_member_count; + member_idx++) { nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_idx], pkts_sent, + test_params->member_port_ids[member_idx], pkts_sent, MAX_PKT_BURST); } rte_ether_addr_copy( - rte_eth_devices[test_params->bonded_port_id].data->mac_addrs, + rte_eth_devices[test_params->bonding_port_id].data->mac_addrs, &bond_mac); /* @@ -4747,7 +4801,7 @@ test_alb_change_mac_in_reply_sent(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client1, RTE_ARP_OP_REPLY); - rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + rte_eth_tx_burst(test_params->bonding_port_id, 0, &pkt, 1); pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); memcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN); @@ -4758,7 +4812,7 @@ test_alb_change_mac_in_reply_sent(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client2, RTE_ARP_OP_REPLY); - rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + rte_eth_tx_burst(test_params->bonding_port_id, 0, &pkt, 1); pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); memcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN); @@ -4769,7 +4823,7 @@ test_alb_change_mac_in_reply_sent(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client3, RTE_ARP_OP_REPLY); - rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + rte_eth_tx_burst(test_params->bonding_port_id, 0, &pkt, 1); pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); memcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN); @@ -4780,20 +4834,20 @@ test_alb_change_mac_in_reply_sent(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client4, RTE_ARP_OP_REPLY); - rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + rte_eth_tx_burst(test_params->bonding_port_id, 0, &pkt, 1); - slave_mac1 = - rte_eth_devices[test_params->slave_port_ids[0]].data->mac_addrs; - slave_mac2 = - rte_eth_devices[test_params->slave_port_ids[1]].data->mac_addrs; + member_mac1 = + rte_eth_devices[test_params->member_port_ids[0]].data->mac_addrs; + member_mac2 = + rte_eth_devices[test_params->member_port_ids[1]].data->mac_addrs; /* * Checking if packets are properly distributed on bonding ports. Packets * 0 and 2 should be sent on port 0 and packets 1 and 3 on port 1. */ - for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + for (member_idx = 0; member_idx < test_params->bonding_member_count; member_idx++) { nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_idx], pkts_sent, + test_params->member_port_ids[member_idx], pkts_sent, MAX_PKT_BURST); for (pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { @@ -4802,14 +4856,14 @@ test_alb_change_mac_in_reply_sent(void) arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt + sizeof(struct rte_ether_hdr)); - if (slave_idx%2 == 0) { - if (!rte_is_same_ether_addr(slave_mac1, + if (member_idx%2 == 0) { + if (!rte_is_same_ether_addr(member_mac1, &arp_pkt->arp_data.arp_sha)) { retval = -1; goto test_end; } } else { - if (!rte_is_same_ether_addr(slave_mac2, + if (!rte_is_same_ether_addr(member_mac2, &arp_pkt->arp_data.arp_sha)) { retval = -1; goto test_end; @@ -4819,7 +4873,7 @@ test_alb_change_mac_in_reply_sent(void) } test_end: - retval += remove_slaves_and_stop_bonded_device(); + retval += remove_members_and_stop_bonding_device(); return retval; } @@ -4832,27 +4886,27 @@ test_alb_reply_from_client(void) struct rte_mbuf *pkt; struct rte_mbuf *pkts_sent[MAX_PKT_BURST]; - int slave_idx, nb_pkts, pkt_idx, nb_pkts_sum = 0; + int member_idx, nb_pkts, pkt_idx, nb_pkts_sum = 0; int retval = 0; struct rte_ether_addr bond_mac, client_mac; - struct rte_ether_addr *slave_mac1, *slave_mac2; + struct rte_ether_addr *member_mac1, *member_mac2; TEST_ASSERT_SUCCESS( - initialize_bonded_device_with_slaves(BONDING_MODE_ALB, - 0, TEST_ALB_SLAVE_COUNT, 1), - "Failed to initialize_bonded_device_with_slaves."); + initialize_bonding_device_with_members(BONDING_MODE_ALB, + 0, TEST_ALB_MEMBER_COUNT, 1), + "Failed to initialize_bonding_device_with_members."); /* Flush tx queue */ - rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); - for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + rte_eth_tx_burst(test_params->bonding_port_id, 0, NULL, 0); + for (member_idx = 0; member_idx < test_params->bonding_member_count; member_idx++) { nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_idx], pkts_sent, + test_params->member_port_ids[member_idx], pkts_sent, MAX_PKT_BURST); } rte_ether_addr_copy( - rte_eth_devices[test_params->bonded_port_id].data->mac_addrs, + rte_eth_devices[test_params->bonding_port_id].data->mac_addrs, &bond_mac); /* @@ -4868,7 +4922,7 @@ test_alb_reply_from_client(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host, RTE_ARP_OP_REPLY); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[0], &pkt, 1); pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); @@ -4880,7 +4934,7 @@ test_alb_reply_from_client(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client2, ip_host, RTE_ARP_OP_REPLY); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[0], &pkt, 1); pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); @@ -4892,7 +4946,7 @@ test_alb_reply_from_client(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client3, ip_host, RTE_ARP_OP_REPLY); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[0], &pkt, 1); pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); @@ -4904,25 +4958,25 @@ test_alb_reply_from_client(void) sizeof(struct rte_ether_hdr)); initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client4, ip_host, RTE_ARP_OP_REPLY); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[0], &pkt, 1); /* * Issue rx_burst and tx_burst to force bonding driver to send update ARP * packets to every client in alb table. */ - rte_eth_rx_burst(test_params->bonded_port_id, 0, pkts_sent, MAX_PKT_BURST); - rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + rte_eth_rx_burst(test_params->bonding_port_id, 0, pkts_sent, MAX_PKT_BURST); + rte_eth_tx_burst(test_params->bonding_port_id, 0, NULL, 0); - slave_mac1 = rte_eth_devices[test_params->slave_port_ids[0]].data->mac_addrs; - slave_mac2 = rte_eth_devices[test_params->slave_port_ids[1]].data->mac_addrs; + member_mac1 = rte_eth_devices[test_params->member_port_ids[0]].data->mac_addrs; + member_mac2 = rte_eth_devices[test_params->member_port_ids[1]].data->mac_addrs; /* - * Checking if update ARP packets were properly send on slave ports. + * Checking if update ARP packets were properly send on member ports. */ - for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + for (member_idx = 0; member_idx < test_params->bonding_member_count; member_idx++) { nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_idx], pkts_sent, MAX_PKT_BURST); + test_params->member_port_ids[member_idx], pkts_sent, MAX_PKT_BURST); nb_pkts_sum += nb_pkts; for (pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { @@ -4931,14 +4985,14 @@ test_alb_reply_from_client(void) arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt + sizeof(struct rte_ether_hdr)); - if (slave_idx%2 == 0) { - if (!rte_is_same_ether_addr(slave_mac1, + if (member_idx%2 == 0) { + if (!rte_is_same_ether_addr(member_mac1, &arp_pkt->arp_data.arp_sha)) { retval = -1; goto test_end; } } else { - if (!rte_is_same_ether_addr(slave_mac2, + if (!rte_is_same_ether_addr(member_mac2, &arp_pkt->arp_data.arp_sha)) { retval = -1; goto test_end; @@ -4954,7 +5008,7 @@ test_alb_reply_from_client(void) } test_end: - retval += remove_slaves_and_stop_bonded_device(); + retval += remove_members_and_stop_bonding_device(); return retval; } @@ -4968,26 +5022,26 @@ test_alb_receive_vlan_reply(void) struct rte_mbuf *pkt; struct rte_mbuf *pkts_sent[MAX_PKT_BURST]; - int slave_idx, nb_pkts, pkt_idx; + int member_idx, nb_pkts, pkt_idx; int retval = 0; struct rte_ether_addr bond_mac, client_mac; TEST_ASSERT_SUCCESS( - initialize_bonded_device_with_slaves(BONDING_MODE_ALB, - 0, TEST_ALB_SLAVE_COUNT, 1), - "Failed to initialize_bonded_device_with_slaves."); + initialize_bonding_device_with_members(BONDING_MODE_ALB, + 0, TEST_ALB_MEMBER_COUNT, 1), + "Failed to initialize_bonding_device_with_members."); /* Flush tx queue */ - rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); - for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + rte_eth_tx_burst(test_params->bonding_port_id, 0, NULL, 0); + for (member_idx = 0; member_idx < test_params->bonding_member_count; member_idx++) { nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_idx], pkts_sent, + test_params->member_port_ids[member_idx], pkts_sent, MAX_PKT_BURST); } rte_ether_addr_copy( - rte_eth_devices[test_params->bonded_port_id].data->mac_addrs, + rte_eth_devices[test_params->bonding_port_id].data->mac_addrs, &bond_mac); /* @@ -5007,18 +5061,18 @@ test_alb_receive_vlan_reply(void) arp_pkt = (struct rte_arp_hdr *)((char *)(vlan_pkt + 1)); initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host, RTE_ARP_OP_REPLY); - virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + virtual_ethdev_add_mbufs_to_rx_queue(test_params->member_port_ids[0], &pkt, 1); - rte_eth_rx_burst(test_params->bonded_port_id, 0, pkts_sent, MAX_PKT_BURST); - rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + rte_eth_rx_burst(test_params->bonding_port_id, 0, pkts_sent, MAX_PKT_BURST); + rte_eth_tx_burst(test_params->bonding_port_id, 0, NULL, 0); /* * Checking if VLAN headers in generated ARP Update packet are correct. */ - for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + for (member_idx = 0; member_idx < test_params->bonding_member_count; member_idx++) { nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( - test_params->slave_port_ids[slave_idx], pkts_sent, + test_params->member_port_ids[member_idx], pkts_sent, MAX_PKT_BURST); for (pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { @@ -5049,7 +5103,7 @@ test_alb_receive_vlan_reply(void) } test_end: - retval += remove_slaves_and_stop_bonded_device(); + retval += remove_members_and_stop_bonding_device(); return retval; } @@ -5062,9 +5116,9 @@ test_alb_ipv4_tx(void) retval = 0; TEST_ASSERT_SUCCESS( - initialize_bonded_device_with_slaves(BONDING_MODE_ALB, - 0, TEST_ALB_SLAVE_COUNT, 1), - "Failed to initialize_bonded_device_with_slaves."); + initialize_bonding_device_with_members(BONDING_MODE_ALB, + 0, TEST_ALB_MEMBER_COUNT, 1), + "Failed to initialize_bonding_device_with_members."); burst_size = 32; @@ -5078,14 +5132,14 @@ test_alb_ipv4_tx(void) * Checking if ipv4 traffic is transmitted via TLB policy. */ pkts_send = rte_eth_tx_burst( - test_params->bonded_port_id, 0, pkt_burst, burst_size); + test_params->bonding_port_id, 0, pkt_burst, burst_size); if (pkts_send != burst_size) { retval = -1; goto test_end; } test_end: - retval += remove_slaves_and_stop_bonded_device(); + retval += remove_members_and_stop_bonding_device(); return retval; } @@ -5094,36 +5148,36 @@ static struct unit_test_suite link_bonding_test_suite = { .setup = test_setup, .teardown = testsuite_teardown, .unit_test_cases = { - TEST_CASE(test_create_bonded_device), - TEST_CASE(test_create_bonded_device_with_invalid_params), - TEST_CASE(test_add_slave_to_bonded_device), - TEST_CASE(test_add_slave_to_invalid_bonded_device), - TEST_CASE(test_remove_slave_from_bonded_device), - TEST_CASE(test_remove_slave_from_invalid_bonded_device), - TEST_CASE(test_get_slaves_from_bonded_device), - TEST_CASE(test_add_already_bonded_slave_to_bonded_device), - TEST_CASE(test_add_remove_multiple_slaves_to_from_bonded_device), - TEST_CASE(test_start_bonded_device), - TEST_CASE(test_stop_bonded_device), + TEST_CASE(test_create_bonding_device), + TEST_CASE(test_create_bonding_device_with_invalid_params), + TEST_CASE(test_add_member_to_bonding_device), + TEST_CASE(test_add_member_to_invalid_bonding_device), + TEST_CASE(test_remove_member_from_bonding_device), + TEST_CASE(test_remove_member_from_invalid_bonding_device), + TEST_CASE(test_get_members_from_bonding_device), + TEST_CASE(test_add_already_bonding_member_to_bonding_device), + TEST_CASE(test_add_remove_multiple_members_to_from_bonding_device), + TEST_CASE(test_start_bonding_device), + TEST_CASE(test_stop_bonding_device), TEST_CASE(test_set_bonding_mode), - TEST_CASE(test_set_primary_slave), - TEST_CASE(test_set_explicit_bonded_mac), - TEST_CASE(test_set_bonded_port_initialization_mac_assignment), + TEST_CASE(test_set_primary_member), + TEST_CASE(test_set_explicit_bonding_mac), + TEST_CASE(test_set_bonding_port_initialization_mac_assignment), TEST_CASE(test_status_interrupt), - TEST_CASE(test_adding_slave_after_bonded_device_started), + TEST_CASE(test_adding_member_after_bonding_device_started), TEST_CASE(test_roundrobin_tx_burst), - TEST_CASE(test_roundrobin_tx_burst_slave_tx_fail), - TEST_CASE(test_roundrobin_rx_burst_on_single_slave), - TEST_CASE(test_roundrobin_rx_burst_on_multiple_slaves), + TEST_CASE(test_roundrobin_tx_burst_member_tx_fail), + TEST_CASE(test_roundrobin_rx_burst_on_single_member), + TEST_CASE(test_roundrobin_rx_burst_on_multiple_members), TEST_CASE(test_roundrobin_verify_promiscuous_enable_disable), TEST_CASE(test_roundrobin_verify_mac_assignment), - TEST_CASE(test_roundrobin_verify_slave_link_status_change_behaviour), - TEST_CASE(test_roundrobin_verfiy_polling_slave_link_status_change), + TEST_CASE(test_roundrobin_verify_member_link_status_change_behaviour), + TEST_CASE(test_roundrobin_verify_polling_member_link_status_change), TEST_CASE(test_activebackup_tx_burst), TEST_CASE(test_activebackup_rx_burst), TEST_CASE(test_activebackup_verify_promiscuous_enable_disable), TEST_CASE(test_activebackup_verify_mac_assignment), - TEST_CASE(test_activebackup_verify_slave_link_status_change_failover), + TEST_CASE(test_activebackup_verify_member_link_status_change_failover), TEST_CASE(test_balance_xmit_policy_configuration), TEST_CASE(test_balance_l2_tx_burst), TEST_CASE(test_balance_l23_tx_burst_ipv4_toggle_ip_addr), @@ -5137,28 +5191,28 @@ static struct unit_test_suite link_bonding_test_suite = { TEST_CASE(test_balance_l34_tx_burst_ipv6_toggle_ip_addr), TEST_CASE(test_balance_l34_tx_burst_vlan_ipv6_toggle_ip_addr), TEST_CASE(test_balance_l34_tx_burst_ipv6_toggle_udp_port), - TEST_CASE(test_balance_tx_burst_slave_tx_fail), + TEST_CASE(test_balance_tx_burst_member_tx_fail), TEST_CASE(test_balance_rx_burst), TEST_CASE(test_balance_verify_promiscuous_enable_disable), TEST_CASE(test_balance_verify_mac_assignment), - TEST_CASE(test_balance_verify_slave_link_status_change_behaviour), + TEST_CASE(test_balance_verify_member_link_status_change_behaviour), TEST_CASE(test_tlb_tx_burst), TEST_CASE(test_tlb_rx_burst), TEST_CASE(test_tlb_verify_mac_assignment), TEST_CASE(test_tlb_verify_promiscuous_enable_disable), - TEST_CASE(test_tlb_verify_slave_link_status_change_failover), + TEST_CASE(test_tlb_verify_member_link_status_change_failover), TEST_CASE(test_alb_change_mac_in_reply_sent), TEST_CASE(test_alb_reply_from_client), TEST_CASE(test_alb_receive_vlan_reply), TEST_CASE(test_alb_ipv4_tx), TEST_CASE(test_broadcast_tx_burst), - TEST_CASE(test_broadcast_tx_burst_slave_tx_fail), + TEST_CASE(test_broadcast_tx_burst_member_tx_fail), TEST_CASE(test_broadcast_rx_burst), TEST_CASE(test_broadcast_verify_promiscuous_enable_disable), TEST_CASE(test_broadcast_verify_mac_assignment), - TEST_CASE(test_broadcast_verify_slave_link_status_change_behaviour), - TEST_CASE(test_reconfigure_bonded_device), - TEST_CASE(test_close_bonded_device), + TEST_CASE(test_broadcast_verify_member_link_status_change_behaviour), + TEST_CASE(test_reconfigure_bonding_device), + TEST_CASE(test_close_bonding_device), TEST_CASES_END() /**< NULL terminate unit test array */ } @@ -5171,4 +5225,4 @@ test_link_bonding(void) return unit_test_suite_runner(&link_bonding_test_suite); } -REGISTER_TEST_COMMAND(link_bonding_autotest, test_link_bonding); +REGISTER_DRIVER_TEST(link_bonding_autotest, test_link_bonding); diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c index 21c512c94b8..645fc1e0d49 100644 --- a/app/test/test_link_bonding_mode4.c +++ b/app/test/test_link_bonding_mode4.c @@ -31,7 +31,7 @@ #include "test.h" -#define SLAVE_COUNT (4) +#define MEMBER_COUNT (4) #define RX_RING_SIZE 1024 #define TX_RING_SIZE 1024 @@ -44,17 +44,17 @@ #define MAX_PKT_BURST (32) #define DEF_PKT_BURST (16) -#define BONDED_DEV_NAME ("net_bonding_m4_bond_dev") +#define BONDING_DEV_NAME ("net_bonding_m4_bond_dev") -#define SLAVE_DEV_NAME_FMT ("net_virt_%d") -#define SLAVE_RX_QUEUE_FMT ("net_virt_%d_rx") -#define SLAVE_TX_QUEUE_FMT ("net_virt_%d_tx") +#define MEMBER_DEV_NAME_FMT ("net_virt_%d") +#define MEMBER_RX_QUEUE_FMT ("net_virt_%d_rx") +#define MEMBER_TX_QUEUE_FMT ("net_virt_%d_tx") #define INVALID_SOCKET_ID (-1) #define INVALID_PORT_ID (0xFF) #define INVALID_BONDING_MODE (-1) -static const struct rte_ether_addr slave_mac_default = { +static const struct rte_ether_addr member_mac_default = { { 0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 } }; @@ -70,11 +70,11 @@ static const struct rte_ether_addr slow_protocol_mac_addr = { { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 } }; -struct slave_conf { +struct member_conf { struct rte_ring *rx_queue; struct rte_ring *tx_queue; uint16_t port_id; - uint8_t bonded : 1; + uint8_t bonding : 1; uint8_t lacp_parnter_state; }; @@ -85,22 +85,22 @@ struct ether_vlan_hdr { }; struct link_bonding_unittest_params { - uint8_t bonded_port_id; - struct slave_conf slave_ports[SLAVE_COUNT]; + uint8_t bonding_port_id; + struct member_conf member_ports[MEMBER_COUNT]; struct rte_mempool *mbuf_pool; }; -#define TEST_DEFAULT_SLAVE_COUNT RTE_DIM(test_params.slave_ports) -#define TEST_RX_SLAVE_COUT TEST_DEFAULT_SLAVE_COUNT -#define TEST_TX_SLAVE_COUNT TEST_DEFAULT_SLAVE_COUNT -#define TEST_MARKER_SLAVE_COUT TEST_DEFAULT_SLAVE_COUNT -#define TEST_EXPIRED_SLAVE_COUNT TEST_DEFAULT_SLAVE_COUNT -#define TEST_PROMISC_SLAVE_COUNT TEST_DEFAULT_SLAVE_COUNT +#define TEST_DEFAULT_MEMBER_COUNT RTE_DIM(test_params.member_ports) +#define TEST_RX_MEMBER_COUT TEST_DEFAULT_MEMBER_COUNT +#define TEST_TX_MEMBER_COUNT TEST_DEFAULT_MEMBER_COUNT +#define TEST_MARKER_MEMBER_COUT TEST_DEFAULT_MEMBER_COUNT +#define TEST_EXPIRED_MEMBER_COUNT TEST_DEFAULT_MEMBER_COUNT +#define TEST_PROMISC_MEMBER_COUNT TEST_DEFAULT_MEMBER_COUNT static struct link_bonding_unittest_params test_params = { - .bonded_port_id = INVALID_PORT_ID, - .slave_ports = { [0 ... SLAVE_COUNT - 1] = { .port_id = INVALID_PORT_ID} }, + .bonding_port_id = INVALID_PORT_ID, + .member_ports = { [0 ... MEMBER_COUNT - 1] = { .port_id = INVALID_PORT_ID} }, .mbuf_pool = NULL, }; @@ -120,71 +120,71 @@ static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, }; #define FOR_EACH(_i, _item, _array, _size) \ for (_i = 0, _item = &_array[0]; _i < _size && (_item = &_array[_i]); _i++) -/* Macro for iterating over every port that can be used as a slave +/* Macro for iterating over every port that can be used as a member * in this test. - * _i variable used as an index in test_params->slave_ports - * _slave pointer to &test_params->slave_ports[_idx] + * _i variable used as an index in test_params->member_ports + * _member pointer to &test_params->member_ports[_idx] */ #define FOR_EACH_PORT(_i, _port) \ - FOR_EACH(_i, _port, test_params.slave_ports, \ - RTE_DIM(test_params.slave_ports)) + FOR_EACH(_i, _port, test_params.member_ports, \ + RTE_DIM(test_params.member_ports)) -/* Macro for iterating over every port that can be used as a slave +/* Macro for iterating over every port that can be used as a member * in this test and satisfy given condition. * - * _i variable used as an index in test_params->slave_ports - * _slave pointer to &test_params->slave_ports[_idx] + * _i variable used as an index in test_params->member_ports + * _member pointer to &test_params->member_ports[_idx] * _condition condition that need to be checked */ #define FOR_EACH_PORT_IF(_i, _port, _condition) FOR_EACH_PORT((_i), (_port)) \ if (!!(_condition)) -/* Macro for iterating over every port that is currently a slave of a bonded +/* Macro for iterating over every port that is currently a member of a bonding * device. - * _i variable used as an index in test_params->slave_ports - * _slave pointer to &test_params->slave_ports[_idx] + * _i variable used as an index in test_params->member_ports + * _member pointer to &test_params->member_ports[_idx] * */ -#define FOR_EACH_SLAVE(_i, _slave) \ - FOR_EACH_PORT_IF(_i, _slave, (_slave)->bonded != 0) +#define FOR_EACH_MEMBER(_i, _member) \ + FOR_EACH_PORT_IF(_i, _member, (_member)->bonding != 0) /* - * Returns packets from slaves TX queue. - * slave slave port + * Returns packets from members TX queue. + * member port * buffer for packets * size size of buffer * return number of packets or negative error number */ static int -slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size) +member_get_pkts(struct member_conf *member, struct rte_mbuf **buf, uint16_t size) { - return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, + return rte_ring_dequeue_burst(member->tx_queue, (void **)buf, size, NULL); } /* - * Injects given packets into slaves RX queue. - * slave slave port + * Injects given packets into members RX queue. + * member port * buffer for packets * size number of packets to be injected * return number of queued packets or negative error number */ static int -slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size) +member_put_pkts(struct member_conf *member, struct rte_mbuf **buf, uint16_t size) { - return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, + return rte_ring_enqueue_burst(member->rx_queue, (void **)buf, size, NULL); } static uint16_t bond_rx(struct rte_mbuf **buf, uint16_t size) { - return rte_eth_rx_burst(test_params.bonded_port_id, 0, buf, size); + return rte_eth_rx_burst(test_params.bonding_port_id, 0, buf, size); } static uint16_t bond_tx(struct rte_mbuf **buf, uint16_t size) { - return rte_eth_tx_burst(test_params.bonded_port_id, 0, buf, size); + return rte_eth_tx_burst(test_params.bonding_port_id, 0, buf, size); } static void @@ -219,79 +219,79 @@ configure_ethdev(uint16_t port_id, uint8_t start) } static int -add_slave(struct slave_conf *slave, uint8_t start) +add_member(struct member_conf *member, uint8_t start) { struct rte_ether_addr addr, addr_check; int retval; /* Some sanity check */ - RTE_VERIFY(test_params.slave_ports <= slave && - slave - test_params.slave_ports < (int)RTE_DIM(test_params.slave_ports)); - RTE_VERIFY(slave->bonded == 0); - RTE_VERIFY(slave->port_id != INVALID_PORT_ID); + RTE_VERIFY(test_params.member_ports <= member && + member - test_params.member_ports < (int)RTE_DIM(test_params.member_ports)); + RTE_VERIFY(member->bonding == 0); + RTE_VERIFY(member->port_id != INVALID_PORT_ID); - rte_ether_addr_copy(&slave_mac_default, &addr); - addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id; + rte_ether_addr_copy(&member_mac_default, &addr); + addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = member->port_id; - rte_eth_dev_mac_addr_remove(slave->port_id, &addr); + rte_eth_dev_mac_addr_remove(member->port_id, &addr); - TEST_ASSERT_SUCCESS(rte_eth_dev_mac_addr_add(slave->port_id, &addr, 0), - "Failed to set slave MAC address"); + TEST_ASSERT_SUCCESS(rte_eth_dev_mac_addr_add(member->port_id, &addr, 0), + "Failed to set member MAC address"); - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params.bonded_port_id, - slave->port_id), - "Failed to add slave (idx=%u, id=%u) to bonding (id=%u)", - (uint8_t)(slave - test_params.slave_ports), slave->port_id, - test_params.bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(test_params.bonding_port_id, + member->port_id), + "Failed to add member (idx=%u, id=%u) to bonding (id=%u)", + (uint8_t)(member - test_params.member_ports), member->port_id, + test_params.bonding_port_id); - slave->bonded = 1; + member->bonding = 1; if (start) { - TEST_ASSERT_SUCCESS(rte_eth_dev_start(slave->port_id), - "Failed to start slave %u", slave->port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(member->port_id), + "Failed to start member %u", member->port_id); } - retval = rte_eth_macaddr_get(slave->port_id, &addr_check); - TEST_ASSERT_SUCCESS(retval, "Failed to get slave mac address: %s", + retval = rte_eth_macaddr_get(member->port_id, &addr_check); + TEST_ASSERT_SUCCESS(retval, "Failed to get member mac address: %s", strerror(-retval)); TEST_ASSERT_EQUAL(rte_is_same_ether_addr(&addr, &addr_check), 1, - "Slave MAC address is not as expected"); + "Member MAC address is not as expected"); - RTE_VERIFY(slave->lacp_parnter_state == 0); + RTE_VERIFY(member->lacp_parnter_state == 0); return 0; } static int -remove_slave(struct slave_conf *slave) +remove_member(struct member_conf *member) { - ptrdiff_t slave_idx = slave - test_params.slave_ports; + ptrdiff_t member_idx = member - test_params.member_ports; - RTE_VERIFY(test_params.slave_ports <= slave && - slave_idx < (ptrdiff_t)RTE_DIM(test_params.slave_ports)); + RTE_VERIFY(test_params.member_ports <= member && + member_idx < (ptrdiff_t)RTE_DIM(test_params.member_ports)); - RTE_VERIFY(slave->bonded == 1); - RTE_VERIFY(slave->port_id != INVALID_PORT_ID); + RTE_VERIFY(member->bonding == 1); + RTE_VERIFY(member->port_id != INVALID_PORT_ID); - TEST_ASSERT_EQUAL(rte_ring_count(slave->rx_queue), 0, - "Slave %u tx queue not empty while removing from bonding.", - slave->port_id); + TEST_ASSERT_EQUAL(rte_ring_count(member->rx_queue), 0, + "Member %u tx queue not empty while removing from bonding.", + member->port_id); - TEST_ASSERT_EQUAL(rte_ring_count(slave->rx_queue), 0, - "Slave %u tx queue not empty while removing from bonding.", - slave->port_id); + TEST_ASSERT_EQUAL(rte_ring_count(member->rx_queue), 0, + "Member %u tx queue not empty while removing from bonding.", + member->port_id); - TEST_ASSERT_EQUAL(rte_eth_bond_slave_remove(test_params.bonded_port_id, - slave->port_id), 0, - "Failed to remove slave (idx=%u, id=%u) from bonding (id=%u)", - (uint8_t)slave_idx, slave->port_id, - test_params.bonded_port_id); + TEST_ASSERT_EQUAL(rte_eth_bond_member_remove(test_params.bonding_port_id, + member->port_id), 0, + "Failed to remove member (idx=%u, id=%u) from bonding (id=%u)", + (uint8_t)member_idx, member->port_id, + test_params.bonding_port_id); - slave->bonded = 0; - slave->lacp_parnter_state = 0; + member->bonding = 0; + member->lacp_parnter_state = 0; return 0; } static void -lacp_recv_cb(uint16_t slave_id, struct rte_mbuf *lacp_pkt) +lacp_recv_cb(uint16_t member_id, struct rte_mbuf *lacp_pkt) { struct rte_ether_hdr *hdr; struct slow_protocol_frame *slow_hdr; @@ -304,75 +304,75 @@ lacp_recv_cb(uint16_t slave_id, struct rte_mbuf *lacp_pkt) slow_hdr = rte_pktmbuf_mtod(lacp_pkt, struct slow_protocol_frame *); RTE_VERIFY(slow_hdr->slow_protocol.subtype == SLOW_SUBTYPE_LACP); - lacpdu_rx_count[slave_id]++; + lacpdu_rx_count[member_id]++; rte_pktmbuf_free(lacp_pkt); } static int -initialize_bonded_device_with_slaves(uint16_t slave_count, uint8_t external_sm) +initialize_bonding_device_with_members(uint16_t member_count, uint8_t external_sm) { uint8_t i; int ret; - RTE_VERIFY(test_params.bonded_port_id != INVALID_PORT_ID); + RTE_VERIFY(test_params.bonding_port_id != INVALID_PORT_ID); - for (i = 0; i < slave_count; i++) { - TEST_ASSERT_SUCCESS(add_slave(&test_params.slave_ports[i], 1), - "Failed to add port %u to bonded device.\n", - test_params.slave_ports[i].port_id); + for (i = 0; i < member_count; i++) { + TEST_ASSERT_SUCCESS(add_member(&test_params.member_ports[i], 1), + "Failed to add port %u to bonding device.\n", + test_params.member_ports[i].port_id); } /* Reset mode 4 configuration */ - rte_eth_bond_8023ad_setup(test_params.bonded_port_id, NULL); - ret = rte_eth_promiscuous_disable(test_params.bonded_port_id); + rte_eth_bond_8023ad_setup(test_params.bonding_port_id, NULL); + ret = rte_eth_promiscuous_disable(test_params.bonding_port_id); TEST_ASSERT_SUCCESS(ret, "Failed disable promiscuous mode for port %d: %s", - test_params.bonded_port_id, rte_strerror(-ret)); + test_params.bonding_port_id, rte_strerror(-ret)); if (external_sm) { struct rte_eth_bond_8023ad_conf conf; - rte_eth_bond_8023ad_conf_get(test_params.bonded_port_id, &conf); + rte_eth_bond_8023ad_conf_get(test_params.bonding_port_id, &conf); conf.slowrx_cb = lacp_recv_cb; - rte_eth_bond_8023ad_setup(test_params.bonded_port_id, &conf); + rte_eth_bond_8023ad_setup(test_params.bonding_port_id, &conf); } - TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bonded_port_id), - "Failed to start bonded device"); + TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bonding_port_id), + "Failed to start bonding device"); return TEST_SUCCESS; } static int -remove_slaves_and_stop_bonded_device(void) +remove_members_and_stop_bonding_device(void) { - struct slave_conf *slave; + struct member_conf *member; int retval; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; uint16_t i; - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params.bonded_port_id), - "Failed to stop bonded port %u", - test_params.bonded_port_id); + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params.bonding_port_id), + "Failed to stop bonding port %u", + test_params.bonding_port_id); - FOR_EACH_SLAVE(i, slave) - remove_slave(slave); + FOR_EACH_MEMBER(i, member) + remove_member(member); - retval = rte_eth_bond_slaves_get(test_params.bonded_port_id, slaves, - RTE_DIM(slaves)); + retval = rte_eth_bond_members_get(test_params.bonding_port_id, members, + RTE_DIM(members)); TEST_ASSERT_EQUAL(retval, 0, - "Expected bonded device %u have 0 slaves but returned %d.", - test_params.bonded_port_id, retval); + "Expected bonding device %u have 0 members but returned %d.", + test_params.bonding_port_id, retval); - FOR_EACH_PORT(i, slave) { - TEST_ASSERT_SUCCESS(rte_eth_dev_stop(slave->port_id), - "Failed to stop bonded port %u", - slave->port_id); + FOR_EACH_PORT(i, member) { + TEST_ASSERT_SUCCESS(rte_eth_dev_stop(member->port_id), + "Failed to stop bonding port %u", + member->port_id); - TEST_ASSERT(slave->bonded == 0, - "Port id=%u is still marked as enslaved.", slave->port_id); + TEST_ASSERT(member->bonding == 0, + "Port id=%u is still marked as enmemberd.", member->port_id); } return TEST_SUCCESS; @@ -383,7 +383,7 @@ test_setup(void) { int retval, nb_mbuf_per_pool; char name[RTE_ETH_NAME_MAX_LEN]; - struct slave_conf *port; + struct member_conf *port; const uint8_t socket_id = rte_socket_id(); uint16_t i; @@ -400,10 +400,10 @@ test_setup(void) /* Create / initialize ring eth devs. */ FOR_EACH_PORT(i, port) { - port = &test_params.slave_ports[i]; + port = &test_params.member_ports[i]; if (port->rx_queue == NULL) { - retval = snprintf(name, RTE_DIM(name), SLAVE_RX_QUEUE_FMT, i); + retval = snprintf(name, RTE_DIM(name), MEMBER_RX_QUEUE_FMT, i); TEST_ASSERT(retval <= (int)RTE_DIM(name) - 1, "Name too long"); port->rx_queue = rte_ring_create(name, RX_RING_SIZE, socket_id, 0); TEST_ASSERT(port->rx_queue != NULL, @@ -412,7 +412,7 @@ test_setup(void) } if (port->tx_queue == NULL) { - retval = snprintf(name, RTE_DIM(name), SLAVE_TX_QUEUE_FMT, i); + retval = snprintf(name, RTE_DIM(name), MEMBER_TX_QUEUE_FMT, i); TEST_ASSERT(retval <= (int)RTE_DIM(name) - 1, "Name too long"); port->tx_queue = rte_ring_create(name, TX_RING_SIZE, socket_id, 0); TEST_ASSERT_NOT_NULL(port->tx_queue, @@ -421,7 +421,7 @@ test_setup(void) } if (port->port_id == INVALID_PORT_ID) { - retval = snprintf(name, RTE_DIM(name), SLAVE_DEV_NAME_FMT, i); + retval = snprintf(name, RTE_DIM(name), MEMBER_DEV_NAME_FMT, i); TEST_ASSERT(retval < (int)RTE_DIM(name) - 1, "Name too long"); retval = rte_eth_from_rings(name, &port->rx_queue, 1, &port->tx_queue, 1, socket_id); @@ -436,22 +436,22 @@ test_setup(void) name); } - if (test_params.bonded_port_id == INVALID_PORT_ID) { - retval = rte_eth_bond_create(BONDED_DEV_NAME, BONDING_MODE_8023AD, + if (test_params.bonding_port_id == INVALID_PORT_ID) { + retval = rte_eth_bond_create(BONDING_DEV_NAME, BONDING_MODE_8023AD, socket_id); - TEST_ASSERT(retval >= 0, "Failed to create bonded ethdev %s", - BONDED_DEV_NAME); + TEST_ASSERT(retval >= 0, "Failed to create bonding ethdev %s", + BONDING_DEV_NAME); - test_params.bonded_port_id = retval; - TEST_ASSERT_SUCCESS(configure_ethdev(test_params.bonded_port_id, 0), - "Failed to configure bonded ethdev %s", BONDED_DEV_NAME); - } else if (rte_eth_bond_mode_get(test_params.bonded_port_id) != + test_params.bonding_port_id = retval; + TEST_ASSERT_SUCCESS(configure_ethdev(test_params.bonding_port_id, 0), + "Failed to configure bonding ethdev %s", BONDING_DEV_NAME); + } else if (rte_eth_bond_mode_get(test_params.bonding_port_id) != BONDING_MODE_8023AD) { - TEST_ASSERT(rte_eth_bond_mode_set(test_params.bonded_port_id, + TEST_ASSERT(rte_eth_bond_mode_set(test_params.bonding_port_id, BONDING_MODE_8023AD) == 0, "Failed to set ethdev %d to mode %d", - test_params.bonded_port_id, BONDING_MODE_8023AD); + test_params.bonding_port_id, BONDING_MODE_8023AD); } return 0; @@ -460,14 +460,14 @@ test_setup(void) static void testsuite_teardown(void) { - struct slave_conf *port; + struct member_conf *port; uint8_t i; /* Only stop ports. * Any cleanup/reset state is done when particular test is * started. */ - rte_eth_dev_stop(test_params.bonded_port_id); + rte_eth_dev_stop(test_params.bonding_port_id); FOR_EACH_PORT(i, port) rte_eth_dev_stop(port->port_id); @@ -480,7 +480,7 @@ testsuite_teardown(void) * frame but not LACP */ static int -make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt) +make_lacp_reply(struct member_conf *member, struct rte_mbuf *pkt) { struct rte_ether_hdr *hdr; struct slow_protocol_frame *slow_hdr; @@ -501,11 +501,11 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt) /* Change source address to partner address */ rte_ether_addr_copy(&parnter_mac_default, &slow_hdr->eth_hdr.src_addr); slow_hdr->eth_hdr.src_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = - slave->port_id; + member->port_id; lacp = (struct lacpdu *) &slow_hdr->slow_protocol; /* Save last received state */ - slave->lacp_parnter_state = lacp->actor.state; + member->lacp_parnter_state = lacp->actor.state; /* Change it into LACP replay by matching parameters. */ memcpy(&lacp->partner.port_params, &lacp->actor.port_params, sizeof(struct port_params)); @@ -523,27 +523,27 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt) } /* - * Reads packets from given slave, search for LACP packet and reply them. + * Reads packets from given member, search for LACP packet and reply them. * - * Receives burst of packets from slave. Looks for LACP packet. Drops + * Receives burst of packets from member. Looks for LACP packet. Drops * all other packets. Prepares response LACP and sends it back. * * return number of LACP received and replied, -1 on error. */ static int -bond_handshake_reply(struct slave_conf *slave) +bond_handshake_reply(struct member_conf *member) { int retval; struct rte_mbuf *rx_buf[MAX_PKT_BURST]; struct rte_mbuf *lacp_tx_buf[MAX_PKT_BURST]; uint16_t lacp_tx_buf_cnt = 0, i; - retval = slave_get_pkts(slave, rx_buf, RTE_DIM(rx_buf)); - TEST_ASSERT(retval >= 0, "Getting slave %u packets failed.", - slave->port_id); + retval = member_get_pkts(member, rx_buf, RTE_DIM(rx_buf)); + TEST_ASSERT(retval >= 0, "Getting member %u packets failed.", + member->port_id); for (i = 0; i < (uint16_t)retval; i++) { - if (make_lacp_reply(slave, rx_buf[i]) == 0) { + if (make_lacp_reply(member, rx_buf[i]) == 0) { /* reply with actor's LACP */ lacp_tx_buf[lacp_tx_buf_cnt++] = rx_buf[i]; } else @@ -553,7 +553,7 @@ bond_handshake_reply(struct slave_conf *slave) if (lacp_tx_buf_cnt == 0) return 0; - retval = slave_put_pkts(slave, lacp_tx_buf, lacp_tx_buf_cnt); + retval = member_put_pkts(member, lacp_tx_buf, lacp_tx_buf_cnt); if (retval <= lacp_tx_buf_cnt) { /* retval might be negative */ for (i = RTE_MAX(0, retval); retval < lacp_tx_buf_cnt; retval++) @@ -561,24 +561,24 @@ bond_handshake_reply(struct slave_conf *slave) } TEST_ASSERT_EQUAL(retval, lacp_tx_buf_cnt, - "Failed to equeue lacp packets into slave %u tx queue.", - slave->port_id); + "Failed to equeue lacp packets into member %u tx queue.", + member->port_id); return lacp_tx_buf_cnt; } /* - * Function check if given slave tx queue contains packets that make mode 4 - * handshake complete. It will drain slave queue. + * Function check if given member tx queue contains packets that make mode 4 + * handshake complete. It will drain member queue. * return 0 if handshake not completed, 1 if handshake was complete, */ static int -bond_handshake_done(struct slave_conf *slave) +bond_handshake_done(struct member_conf *member) { const uint8_t expected_state = STATE_LACP_ACTIVE | STATE_SYNCHRONIZATION | STATE_AGGREGATION | STATE_COLLECTING | STATE_DISTRIBUTING; - return slave->lacp_parnter_state == expected_state; + return member->lacp_parnter_state == expected_state; } static unsigned @@ -586,7 +586,7 @@ bond_get_update_timeout_ms(void) { struct rte_eth_bond_8023ad_conf conf; - if (rte_eth_bond_8023ad_conf_get(test_params.bonded_port_id, &conf) < 0) { + if (rte_eth_bond_8023ad_conf_get(test_params.bonding_port_id, &conf) < 0) { RTE_LOG(DEBUG, EAL, "Failed to get bonding configuration: " "%s at %d\n", __func__, __LINE__); RTE_TEST_TRACE_FAILURE(__FILE__, __LINE__, __func__); @@ -603,32 +603,32 @@ bond_get_update_timeout_ms(void) static int bond_handshake(void) { - struct slave_conf *slave; + struct member_conf *member; struct rte_mbuf *buf[MAX_PKT_BURST]; uint16_t nb_pkts; - uint8_t all_slaves_done, i, j; - uint8_t status[RTE_DIM(test_params.slave_ports)] = { 0 }; + uint8_t all_members_done, i, j; + uint8_t status[RTE_DIM(test_params.member_ports)] = { 0 }; const unsigned delay = bond_get_update_timeout_ms(); /* Exchange LACP frames */ - all_slaves_done = 0; - for (i = 0; i < 30 && all_slaves_done == 0; ++i) { + all_members_done = 0; + for (i = 0; i < 30 && all_members_done == 0; ++i) { rte_delay_ms(delay); - all_slaves_done = 1; - FOR_EACH_SLAVE(j, slave) { - /* If response already send, skip slave */ + all_members_done = 1; + FOR_EACH_MEMBER(j, member) { + /* If response already send, skip member */ if (status[j] != 0) continue; - if (bond_handshake_reply(slave) < 0) { - all_slaves_done = 0; + if (bond_handshake_reply(member) < 0) { + all_members_done = 0; break; } - status[j] = bond_handshake_done(slave); + status[j] = bond_handshake_done(member); if (status[j] == 0) - all_slaves_done = 0; + all_members_done = 0; } nb_pkts = bond_tx(NULL, 0); @@ -639,26 +639,26 @@ bond_handshake(void) TEST_ASSERT_EQUAL(nb_pkts, 0, "Packets received unexpectedly"); } /* If response didn't send - report failure */ - TEST_ASSERT_EQUAL(all_slaves_done, 1, "Bond handshake failed\n"); + TEST_ASSERT_EQUAL(all_members_done, 1, "Bond handshake failed\n"); /* If flags doesn't match - report failure */ - return all_slaves_done == 1 ? TEST_SUCCESS : TEST_FAILED; + return all_members_done == 1 ? TEST_SUCCESS : TEST_FAILED; } -#define TEST_LACP_SLAVE_COUT RTE_DIM(test_params.slave_ports) +#define TEST_LACP_MEMBER_COUT RTE_DIM(test_params.member_ports) static int test_mode4_lacp(void) { int retval; - retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_LACP_MEMBER_COUT, 0); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); /* Test LACP handshake function */ retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); return TEST_SUCCESS; @@ -668,33 +668,33 @@ test_mode4_agg_mode_selection(void) { int retval; /* Test and verify for Stable mode */ - retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_LACP_MEMBER_COUT, 0); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); retval = rte_eth_bond_8023ad_agg_selection_set( - test_params.bonded_port_id, AGG_STABLE); + test_params.bonding_port_id, AGG_STABLE); TEST_ASSERT_SUCCESS(retval, "Failed to initialize bond aggregation mode"); retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); retval = rte_eth_bond_8023ad_agg_selection_get( - test_params.bonded_port_id); + test_params.bonding_port_id); TEST_ASSERT_EQUAL(retval, AGG_STABLE, "Wrong agg mode received from bonding device"); - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); /* test and verify for Bandwidth mode */ - retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_LACP_MEMBER_COUT, 0); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); retval = rte_eth_bond_8023ad_agg_selection_set( - test_params.bonded_port_id, + test_params.bonding_port_id, AGG_BANDWIDTH); TEST_ASSERT_SUCCESS(retval, "Failed to initialize bond aggregation mode"); @@ -702,31 +702,31 @@ test_mode4_agg_mode_selection(void) TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); retval = rte_eth_bond_8023ad_agg_selection_get( - test_params.bonded_port_id); + test_params.bonding_port_id); TEST_ASSERT_EQUAL(retval, AGG_BANDWIDTH, "Wrong agg mode received from bonding device"); - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); /* test and verify selection for count mode */ - retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_LACP_MEMBER_COUT, 0); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); retval = rte_eth_bond_8023ad_agg_selection_set( - test_params.bonded_port_id, AGG_COUNT); + test_params.bonding_port_id, AGG_COUNT); TEST_ASSERT_SUCCESS(retval, "Failed to initialize bond aggregation mode"); retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); retval = rte_eth_bond_8023ad_agg_selection_get( - test_params.bonded_port_id); + test_params.bonding_port_id); TEST_ASSERT_EQUAL(retval, AGG_COUNT, "Wrong agg mode received from bonding device"); - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); return TEST_SUCCESS; @@ -780,7 +780,7 @@ generate_packets(struct rte_ether_addr *src_mac, } static int -generate_and_put_packets(struct slave_conf *slave, +generate_and_put_packets(struct member_conf *member, struct rte_ether_addr *src_mac, struct rte_ether_addr *dst_mac, uint16_t count) { @@ -791,12 +791,12 @@ generate_and_put_packets(struct slave_conf *slave, if (retval != (int)count) return retval; - retval = slave_put_pkts(slave, pkts, count); + retval = member_put_pkts(member, pkts, count); if (retval > 0 && retval != count) free_pkts(&pkts[retval], count - retval); TEST_ASSERT_EQUAL(retval, count, - "Failed to enqueue packets into slave %u RX queue", slave->port_id); + "Failed to enqueue packets into member %u RX queue", member->port_id); return TEST_SUCCESS; } @@ -804,7 +804,7 @@ generate_and_put_packets(struct slave_conf *slave, static int test_mode4_rx(void) { - struct slave_conf *slave; + struct member_conf *member; uint16_t i, j; uint16_t expected_pkts_cnt; @@ -817,19 +817,19 @@ test_mode4_rx(void) struct rte_ether_addr src_mac = { { 0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 } }; struct rte_ether_addr dst_mac; - struct rte_ether_addr bonded_mac; + struct rte_ether_addr bonding_mac; - retval = initialize_bonded_device_with_slaves(TEST_PROMISC_SLAVE_COUNT, + retval = initialize_bonding_device_with_members(TEST_PROMISC_MEMBER_COUNT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); - retval = rte_eth_macaddr_get(test_params.bonded_port_id, &bonded_mac); + retval = rte_eth_macaddr_get(test_params.bonding_port_id, &bonding_mac); TEST_ASSERT_SUCCESS(retval, "Failed to get mac address: %s", strerror(-retval)); - rte_ether_addr_copy(&bonded_mac, &dst_mac); + rte_ether_addr_copy(&bonding_mac, &dst_mac); /* Assert that dst address is not bonding address. Do not set the * least significant bit of the zero byte as this would create a @@ -838,28 +838,28 @@ test_mode4_rx(void) dst_mac.addr_bytes[0] += 2; /* First try with promiscuous mode enabled. - * Add 2 packets to each slave. First with bonding MAC address, second with + * Add 2 packets to each member. First with bonding MAC address, second with * different. Check if we received all of them. */ - retval = rte_eth_promiscuous_enable(test_params.bonded_port_id); + retval = rte_eth_promiscuous_enable(test_params.bonding_port_id); TEST_ASSERT_SUCCESS(retval, "Failed to enable promiscuous mode for port %d: %s", - test_params.bonded_port_id, rte_strerror(-retval)); + test_params.bonding_port_id, rte_strerror(-retval)); expected_pkts_cnt = 0; - FOR_EACH_SLAVE(i, slave) { - retval = generate_and_put_packets(slave, &src_mac, &bonded_mac, 1); - TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to slave %u", - slave->port_id); + FOR_EACH_MEMBER(i, member) { + retval = generate_and_put_packets(member, &src_mac, &bonding_mac, 1); + TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to member %u", + member->port_id); - retval = generate_and_put_packets(slave, &src_mac, &dst_mac, 1); - TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to slave %u", - slave->port_id); + retval = generate_and_put_packets(member, &src_mac, &dst_mac, 1); + TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to member %u", + member->port_id); - /* Expect 2 packets per slave */ + /* Expect 2 packets per member */ expected_pkts_cnt += 2; } - retval = rte_eth_rx_burst(test_params.bonded_port_id, 0, pkts, + retval = rte_eth_rx_burst(test_params.bonding_port_id, 0, pkts, RTE_DIM(pkts)); if (retval == expected_pkts_cnt) { @@ -868,7 +868,7 @@ test_mode4_rx(void) for (i = 0; i < expected_pkts_cnt; i++) { hdr = rte_pktmbuf_mtod(pkts[i], struct rte_ether_hdr *); cnt[rte_is_same_ether_addr(&hdr->dst_addr, - &bonded_mac)]++; + &bonding_mac)]++; } free_pkts(pkts, expected_pkts_cnt); @@ -888,26 +888,26 @@ test_mode4_rx(void) /* Now, disable promiscuous mode. When promiscuous mode is disabled we * expect to receive only packets that are directed to bonding port. */ - retval = rte_eth_promiscuous_disable(test_params.bonded_port_id); + retval = rte_eth_promiscuous_disable(test_params.bonding_port_id); TEST_ASSERT_SUCCESS(retval, "Failed to disable promiscuous mode for port %d: %s", - test_params.bonded_port_id, rte_strerror(-retval)); + test_params.bonding_port_id, rte_strerror(-retval)); expected_pkts_cnt = 0; - FOR_EACH_SLAVE(i, slave) { - retval = generate_and_put_packets(slave, &src_mac, &bonded_mac, 1); - TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to slave %u", - slave->port_id); + FOR_EACH_MEMBER(i, member) { + retval = generate_and_put_packets(member, &src_mac, &bonding_mac, 1); + TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to member %u", + member->port_id); - retval = generate_and_put_packets(slave, &src_mac, &dst_mac, 1); - TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to slave %u", - slave->port_id); + retval = generate_and_put_packets(member, &src_mac, &dst_mac, 1); + TEST_ASSERT_SUCCESS(retval, "Failed to enqueue packets to member %u", + member->port_id); - /* Expect only one packet per slave */ + /* Expect only one packet per member */ expected_pkts_cnt += 1; } - retval = rte_eth_rx_burst(test_params.bonded_port_id, 0, pkts, + retval = rte_eth_rx_burst(test_params.bonding_port_id, 0, pkts, RTE_DIM(pkts)); if (retval == expected_pkts_cnt) { @@ -916,7 +916,7 @@ test_mode4_rx(void) for (i = 0; i < expected_pkts_cnt; i++) { hdr = rte_pktmbuf_mtod(pkts[i], struct rte_ether_hdr *); eq_cnt += rte_is_same_ether_addr(&hdr->dst_addr, - &bonded_mac); + &bonding_mac); } free_pkts(pkts, expected_pkts_cnt); @@ -927,19 +927,19 @@ test_mode4_rx(void) TEST_ASSERT_EQUAL(retval, expected_pkts_cnt, "Expected %u packets but received only %d", expected_pkts_cnt, retval); - /* Link down test: simulate link down for first slave. */ + /* Link down test: simulate link down for first member. */ delay = bond_get_update_timeout_ms(); - uint8_t slave_down_id = INVALID_PORT_ID; + uint8_t member_down_id = INVALID_PORT_ID; - /* Find first slave and make link down on it*/ - FOR_EACH_SLAVE(i, slave) { - rte_eth_dev_set_link_down(slave->port_id); - slave_down_id = slave->port_id; + /* Find first member and make link down on it*/ + FOR_EACH_MEMBER(i, member) { + rte_eth_dev_set_link_down(member->port_id); + member_down_id = member->port_id; break; } - RTE_VERIFY(slave_down_id != INVALID_PORT_ID); + RTE_VERIFY(member_down_id != INVALID_PORT_ID); /* Give some time to rearrange bonding */ for (i = 0; i < 3; i++) { @@ -949,16 +949,16 @@ test_mode4_rx(void) TEST_ASSERT_SUCCESS(bond_handshake(), "Handshake after link down failed"); - /* Put packet to each slave */ - FOR_EACH_SLAVE(i, slave) { + /* Put packet to each member */ + FOR_EACH_MEMBER(i, member) { void *pkt = NULL; - dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id; - retval = generate_and_put_packets(slave, &src_mac, &dst_mac, 1); + dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = member->port_id; + retval = generate_and_put_packets(member, &src_mac, &dst_mac, 1); TEST_ASSERT_SUCCESS(retval, "Failed to generate test packet burst."); - src_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id; - retval = generate_and_put_packets(slave, &src_mac, &bonded_mac, 1); + src_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = member->port_id; + retval = generate_and_put_packets(member, &src_mac, &bonding_mac, 1); TEST_ASSERT_SUCCESS(retval, "Failed to generate test packet burst."); retval = bond_rx(pkts, RTE_DIM(pkts)); @@ -967,36 +967,36 @@ test_mode4_rx(void) if (retval > 0) free_pkts(pkts, retval); - while (rte_ring_dequeue(slave->rx_queue, (void **)&pkt) == 0) + while (rte_ring_dequeue(member->rx_queue, (void **)&pkt) == 0) rte_pktmbuf_free(pkt); - if (slave_down_id == slave->port_id) + if (member_down_id == member->port_id) TEST_ASSERT_EQUAL(retval, 0, "Packets received unexpectedly."); else TEST_ASSERT_NOT_EQUAL(retval, 0, - "Expected to receive some packets on slave %u.", - slave->port_id); - rte_eth_dev_start(slave->port_id); + "Expected to receive some packets on member %u.", + member->port_id); + rte_eth_dev_start(member->port_id); for (j = 0; j < 5; j++) { - TEST_ASSERT(bond_handshake_reply(slave) >= 0, + TEST_ASSERT(bond_handshake_reply(member) >= 0, "Handshake after link up"); - if (bond_handshake_done(slave) == 1) + if (bond_handshake_done(member) == 1) break; } - TEST_ASSERT(j < 5, "Failed to aggregate slave after link up"); + TEST_ASSERT(j < 5, "Failed to aggregate member after link up"); } - remove_slaves_and_stop_bonded_device(); + remove_members_and_stop_bonding_device(); return TEST_SUCCESS; } static int test_mode4_tx_burst(void) { - struct slave_conf *slave; + struct member_conf *member; uint16_t i, j; uint16_t exp_pkts_cnt, pkts_cnt = 0; @@ -1006,21 +1006,21 @@ test_mode4_tx_burst(void) struct rte_ether_addr dst_mac = { { 0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 } }; - struct rte_ether_addr bonded_mac; + struct rte_ether_addr bonding_mac; - retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_TX_MEMBER_COUNT, 0); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); - retval = rte_eth_macaddr_get(test_params.bonded_port_id, &bonded_mac); + retval = rte_eth_macaddr_get(test_params.bonding_port_id, &bonding_mac); TEST_ASSERT_SUCCESS(retval, "Failed to get mac address: %s", strerror(-retval)); /* Prepare burst */ for (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) { dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt; - retval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]); + retval = generate_packets(&bonding_mac, &dst_mac, 1, &pkts[pkts_cnt]); if (retval != 1) free_pkts(pkts, pkts_cnt); @@ -1029,26 +1029,26 @@ test_mode4_tx_burst(void) } exp_pkts_cnt = pkts_cnt; - /* Transmit packets on bonded device */ + /* Transmit packets on bonding device */ retval = bond_tx(pkts, pkts_cnt); if (retval > 0 && retval < pkts_cnt) free_pkts(&pkts[retval], pkts_cnt - retval); - TEST_ASSERT_EQUAL(retval, pkts_cnt, "TX on bonded device failed"); + TEST_ASSERT_EQUAL(retval, pkts_cnt, "TX on bonding device failed"); - /* Check if packets were transmitted properly. Every slave should have + /* Check if packets were transmitted properly. Every member should have * at least one packet, and sum must match. Under normal operation * there should be no LACP nor MARKER frames. */ pkts_cnt = 0; - FOR_EACH_SLAVE(i, slave) { + FOR_EACH_MEMBER(i, member) { uint16_t normal_cnt, slow_cnt; - retval = slave_get_pkts(slave, pkts, RTE_DIM(pkts)); + retval = member_get_pkts(member, pkts, RTE_DIM(pkts)); normal_cnt = 0; slow_cnt = 0; for (j = 0; j < retval; j++) { - if (make_lacp_reply(slave, pkts[j]) == 1) + if (make_lacp_reply(member, pkts[j]) == 1) normal_cnt++; else slow_cnt++; @@ -1056,11 +1056,11 @@ test_mode4_tx_burst(void) free_pkts(pkts, normal_cnt + slow_cnt); TEST_ASSERT_EQUAL(slow_cnt, 0, - "slave %u unexpectedly transmitted %d SLOW packets", slave->port_id, + "member %u unexpectedly transmitted %d SLOW packets", member->port_id, slow_cnt); TEST_ASSERT_NOT_EQUAL(normal_cnt, 0, - "slave %u did not transmitted any packets", slave->port_id); + "member %u did not transmitted any packets", member->port_id); pkts_cnt += normal_cnt; } @@ -1068,19 +1068,21 @@ test_mode4_tx_burst(void) TEST_ASSERT_EQUAL(exp_pkts_cnt, pkts_cnt, "Expected %u packets but transmitted only %d", exp_pkts_cnt, pkts_cnt); - /* Link down test: - * simulate link down for first slave. */ + /* + * Link down test: + * simulate link down for first member. + */ delay = bond_get_update_timeout_ms(); - uint8_t slave_down_id = INVALID_PORT_ID; + uint8_t member_down_id = INVALID_PORT_ID; - FOR_EACH_SLAVE(i, slave) { - rte_eth_dev_set_link_down(slave->port_id); - slave_down_id = slave->port_id; + FOR_EACH_MEMBER(i, member) { + rte_eth_dev_set_link_down(member->port_id); + member_down_id = member->port_id; break; } - RTE_VERIFY(slave_down_id != INVALID_PORT_ID); + RTE_VERIFY(member_down_id != INVALID_PORT_ID); /* Give some time to rearrange bonding. */ for (i = 0; i < 3; i++) { @@ -1093,7 +1095,7 @@ test_mode4_tx_burst(void) /* Prepare burst. */ for (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) { dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt; - retval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]); + retval = generate_packets(&bonding_mac, &dst_mac, 1, &pkts[pkts_cnt]); if (retval != 1) free_pkts(pkts, pkts_cnt); @@ -1103,26 +1105,26 @@ test_mode4_tx_burst(void) } exp_pkts_cnt = pkts_cnt; - /* Transmit packets on bonded device. */ + /* Transmit packets on bonding device. */ retval = bond_tx(pkts, pkts_cnt); if (retval > 0 && retval < pkts_cnt) free_pkts(&pkts[retval], pkts_cnt - retval); - TEST_ASSERT_EQUAL(retval, pkts_cnt, "TX on bonded device failed"); + TEST_ASSERT_EQUAL(retval, pkts_cnt, "TX on bonding device failed"); - /* Check if packets was transmitted properly. Every slave should have + /* Check if packets was transmitted properly. Every member should have * at least one packet, and sum must match. Under normal operation * there should be no LACP nor MARKER frames. */ pkts_cnt = 0; - FOR_EACH_SLAVE(i, slave) { + FOR_EACH_MEMBER(i, member) { uint16_t normal_cnt, slow_cnt; - retval = slave_get_pkts(slave, pkts, RTE_DIM(pkts)); + retval = member_get_pkts(member, pkts, RTE_DIM(pkts)); normal_cnt = 0; slow_cnt = 0; for (j = 0; j < retval; j++) { - if (make_lacp_reply(slave, pkts[j]) == 1) + if (make_lacp_reply(member, pkts[j]) == 1) normal_cnt++; else slow_cnt++; @@ -1130,17 +1132,17 @@ test_mode4_tx_burst(void) free_pkts(pkts, normal_cnt + slow_cnt); - if (slave_down_id == slave->port_id) { + if (member_down_id == member->port_id) { TEST_ASSERT_EQUAL(normal_cnt + slow_cnt, 0, - "slave %u enexpectedly transmitted %u packets", - normal_cnt + slow_cnt, slave->port_id); + "member %u enexpectedly transmitted %u packets", + normal_cnt + slow_cnt, member->port_id); } else { TEST_ASSERT_EQUAL(slow_cnt, 0, - "slave %u unexpectedly transmitted %d SLOW packets", - slave->port_id, slow_cnt); + "member %u unexpectedly transmitted %d SLOW packets", + member->port_id, slow_cnt); TEST_ASSERT_NOT_EQUAL(normal_cnt, 0, - "slave %u did not transmitted any packets", slave->port_id); + "member %u did not transmitted any packets", member->port_id); } pkts_cnt += normal_cnt; @@ -1149,11 +1151,11 @@ test_mode4_tx_burst(void) TEST_ASSERT_EQUAL(exp_pkts_cnt, pkts_cnt, "Expected %u packets but transmitted only %d", exp_pkts_cnt, pkts_cnt); - return remove_slaves_and_stop_bonded_device(); + return remove_members_and_stop_bonding_device(); } static void -init_marker(struct rte_mbuf *pkt, struct slave_conf *slave) +init_marker(struct rte_mbuf *pkt, struct member_conf *member) { struct marker_header *marker_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *); @@ -1166,7 +1168,7 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave) rte_ether_addr_copy(&parnter_mac_default, &marker_hdr->eth_hdr.src_addr); marker_hdr->eth_hdr.src_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = - slave->port_id; + member->port_id; marker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW); @@ -1177,7 +1179,7 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave) offsetof(struct marker, reserved_90) - offsetof(struct marker, requester_port); RTE_VERIFY(marker_hdr->marker.info_length == 16); - marker_hdr->marker.requester_port = slave->port_id + 1; + marker_hdr->marker.requester_port = member->port_id + 1; marker_hdr->marker.tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION; marker_hdr->marker.terminator_length = 0; } @@ -1185,7 +1187,7 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave) static int test_mode4_marker(void) { - struct slave_conf *slave; + struct member_conf *member; struct rte_mbuf *pkts[MAX_PKT_BURST]; struct rte_mbuf *marker_pkt; struct marker_header *marker_hdr; @@ -1196,30 +1198,30 @@ test_mode4_marker(void) uint8_t i, j; const uint16_t ethtype_slow_be = rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); - retval = initialize_bonded_device_with_slaves(TEST_MARKER_SLAVE_COUT, + retval = initialize_bonding_device_with_members(TEST_MARKER_MEMBER_COUT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); /* Test LACP handshake function */ retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); delay = bond_get_update_timeout_ms(); - FOR_EACH_SLAVE(i, slave) { + FOR_EACH_MEMBER(i, member) { marker_pkt = rte_pktmbuf_alloc(test_params.mbuf_pool); TEST_ASSERT_NOT_NULL(marker_pkt, "Failed to allocate marker packet"); - init_marker(marker_pkt, slave); + init_marker(marker_pkt, member); - retval = slave_put_pkts(slave, &marker_pkt, 1); + retval = member_put_pkts(member, &marker_pkt, 1); if (retval != 1) rte_pktmbuf_free(marker_pkt); TEST_ASSERT_EQUAL(retval, 1, - "Failed to send marker packet to slave %u", slave->port_id); + "Failed to send marker packet to member %u", member->port_id); for (j = 0; j < 20; ++j) { rte_delay_ms(delay); - retval = rte_eth_rx_burst(test_params.bonded_port_id, 0, pkts, + retval = rte_eth_rx_burst(test_params.bonding_port_id, 0, pkts, RTE_DIM(pkts)); if (retval > 0) @@ -1227,19 +1229,19 @@ test_mode4_marker(void) TEST_ASSERT_EQUAL(retval, 0, "Received packets unexpectedly"); - retval = rte_eth_tx_burst(test_params.bonded_port_id, 0, NULL, 0); + retval = rte_eth_tx_burst(test_params.bonding_port_id, 0, NULL, 0); TEST_ASSERT_EQUAL(retval, 0, "Requested TX of 0 packets but %d transmitted", retval); /* Check if LACP packet was send by state machines First and only packet must be a maker response */ - retval = slave_get_pkts(slave, pkts, MAX_PKT_BURST); + retval = member_get_pkts(member, pkts, MAX_PKT_BURST); if (retval == 0) continue; if (retval > 1) free_pkts(pkts, retval); - TEST_ASSERT_EQUAL(retval, 1, "failed to get slave packets"); + TEST_ASSERT_EQUAL(retval, 1, "failed to get member packets"); nb_pkts = retval; marker_hdr = rte_pktmbuf_mtod(pkts[0], struct marker_header *); @@ -1263,7 +1265,7 @@ test_mode4_marker(void) TEST_ASSERT(j < 20, "Marker response not found"); } - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); return TEST_SUCCESS; @@ -1272,7 +1274,7 @@ test_mode4_marker(void) static int test_mode4_expired(void) { - struct slave_conf *slave, *exp_slave = NULL; + struct member_conf *member, *exp_member = NULL; struct rte_mbuf *pkts[MAX_PKT_BURST]; int retval; uint32_t old_delay; @@ -1282,10 +1284,10 @@ test_mode4_expired(void) struct rte_eth_bond_8023ad_conf conf; - retval = initialize_bonded_device_with_slaves(TEST_EXPIRED_SLAVE_COUNT, + retval = initialize_bonding_device_with_members(TEST_EXPIRED_MEMBER_COUNT, 0); /* Set custom timeouts to make test last shorter. */ - rte_eth_bond_8023ad_conf_get(test_params.bonded_port_id, &conf); + rte_eth_bond_8023ad_conf_get(test_params.bonding_port_id, &conf); conf.fast_periodic_ms = 100; conf.slow_periodic_ms = 600; conf.short_timeout_ms = 300; @@ -1294,12 +1296,12 @@ test_mode4_expired(void) conf.tx_period_ms = 100; old_delay = conf.update_timeout_ms; conf.update_timeout_ms = 10; - rte_eth_bond_8023ad_setup(test_params.bonded_port_id, &conf); + rte_eth_bond_8023ad_setup(test_params.bonding_port_id, &conf); /* Wait for new settings to be applied. */ for (i = 0; i < old_delay/conf.update_timeout_ms * 2; i++) { - FOR_EACH_SLAVE(j, slave) - bond_handshake_reply(slave); + FOR_EACH_MEMBER(j, member) + bond_handshake_reply(member); rte_delay_ms(conf.update_timeout_ms); } @@ -1307,13 +1309,13 @@ test_mode4_expired(void) retval = bond_handshake(); TEST_ASSERT_SUCCESS(retval, "Initial handshake failed"); - /* Find first slave */ - FOR_EACH_SLAVE(i, slave) { - exp_slave = slave; + /* Find first member */ + FOR_EACH_MEMBER(i, member) { + exp_member = member; break; } - RTE_VERIFY(exp_slave != NULL); + RTE_VERIFY(exp_member != NULL); /* When one of partners do not send or respond to LACP frame in * conf.long_timeout_ms time, internal state machines should detect this @@ -1325,16 +1327,16 @@ test_mode4_expired(void) TEST_ASSERT_EQUAL(retval, 0, "Unexpectedly received %d packets", retval); - FOR_EACH_SLAVE(i, slave) { - retval = bond_handshake_reply(slave); + FOR_EACH_MEMBER(i, member) { + retval = bond_handshake_reply(member); TEST_ASSERT(retval >= 0, "Handshake failed"); - /* Remove replay for slave that suppose to be expired. */ - if (slave == exp_slave) { - while (rte_ring_count(slave->rx_queue) > 0) { + /* Remove replay for member that suppose to be expired. */ + if (member == exp_member) { + while (rte_ring_count(member->rx_queue) > 0) { void *pkt = NULL; - rte_ring_dequeue(slave->rx_queue, &pkt); + rte_ring_dequeue(member->rx_queue, &pkt); rte_pktmbuf_free(pkt); } } @@ -1348,17 +1350,17 @@ test_mode4_expired(void) retval); } - /* After test only expected slave should be in EXPIRED state */ - FOR_EACH_SLAVE(i, slave) { - if (slave == exp_slave) - TEST_ASSERT(slave->lacp_parnter_state & STATE_EXPIRED, - "Slave %u should be in expired.", slave->port_id); + /* After test only expected member should be in EXPIRED state */ + FOR_EACH_MEMBER(i, member) { + if (member == exp_member) + TEST_ASSERT(member->lacp_parnter_state & STATE_EXPIRED, + "Member %u should be in expired.", member->port_id); else - TEST_ASSERT_EQUAL(bond_handshake_done(slave), 1, - "Slave %u should be operational.", slave->port_id); + TEST_ASSERT_EQUAL(bond_handshake_done(member), 1, + "Member %u should be operational.", member->port_id); } - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); return TEST_SUCCESS; @@ -1368,21 +1370,21 @@ static int test_mode4_ext_ctrl(void) { /* - * configure bonded interface without the external sm enabled + * configure bonding interface without the external sm enabled * . try to transmit lacpdu (should fail) * . try to set collecting and distributing flags (should fail) * reconfigure w/external sm - * . transmit one lacpdu on each slave using new api - * . make sure each slave receives one lacpdu using the callback api - * . transmit one data pdu on each slave (should fail) + * . transmit one lacpdu on each member using new api + * . make sure each member receives one lacpdu using the callback api + * . transmit one data pdu on each member (should fail) * . enable distribution and collection, send one data pdu each again */ int retval; - struct slave_conf *slave = NULL; + struct member_conf *member = NULL; uint8_t i; - struct rte_mbuf *lacp_tx_buf[SLAVE_COUNT]; + struct rte_mbuf *lacp_tx_buf[MEMBER_COUNT]; struct rte_ether_addr src_mac, dst_mac; struct lacpdu_header lacpdu = { .lacpdu = { @@ -1396,31 +1398,31 @@ test_mode4_ext_ctrl(void) initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac, RTE_ETHER_TYPE_SLOW, 0, 0); - for (i = 0; i < SLAVE_COUNT; i++) { + for (i = 0; i < MEMBER_COUNT; i++) { lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool); rte_memcpy(rte_pktmbuf_mtod(lacp_tx_buf[i], char *), &lacpdu, sizeof(lacpdu)); rte_pktmbuf_pkt_len(lacp_tx_buf[i]) = sizeof(lacpdu); } - retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 0); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_TX_MEMBER_COUNT, 0); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); - FOR_EACH_SLAVE(i, slave) { + FOR_EACH_MEMBER(i, member) { TEST_ASSERT_FAIL(rte_eth_bond_8023ad_ext_slowtx( - test_params.bonded_port_id, - slave->port_id, lacp_tx_buf[i]), - "Slave should not allow manual LACP xmit"); + test_params.bonding_port_id, + member->port_id, lacp_tx_buf[i]), + "Member should not allow manual LACP xmit"); TEST_ASSERT_FAIL(rte_eth_bond_8023ad_ext_collect( - test_params.bonded_port_id, - slave->port_id, 1), - "Slave should not allow external state controls"); + test_params.bonding_port_id, + member->port_id, 1), + "Member should not allow external state controls"); } free_pkts(lacp_tx_buf, RTE_DIM(lacp_tx_buf)); - retval = remove_slaves_and_stop_bonded_device(); - TEST_ASSERT_SUCCESS(retval, "Bonded device cleanup failed."); + retval = remove_members_and_stop_bonding_device(); + TEST_ASSERT_SUCCESS(retval, "Bonding device cleanup failed."); return TEST_SUCCESS; } @@ -1430,13 +1432,13 @@ static int test_mode4_ext_lacp(void) { int retval; - struct slave_conf *slave = NULL; - uint8_t all_slaves_done = 0, i; + struct member_conf *member = NULL; + uint8_t all_members_done = 0, i; uint16_t nb_pkts; const unsigned int delay = bond_get_update_timeout_ms(); - struct rte_mbuf *lacp_tx_buf[SLAVE_COUNT]; - struct rte_mbuf *buf[SLAVE_COUNT]; + struct rte_mbuf *lacp_tx_buf[MEMBER_COUNT]; + struct rte_mbuf *buf[MEMBER_COUNT]; struct rte_ether_addr src_mac, dst_mac; struct lacpdu_header lacpdu = { .lacpdu = { @@ -1450,15 +1452,15 @@ test_mode4_ext_lacp(void) initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac, RTE_ETHER_TYPE_SLOW, 0, 0); - for (i = 0; i < SLAVE_COUNT; i++) { + for (i = 0; i < MEMBER_COUNT; i++) { lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool); rte_memcpy(rte_pktmbuf_mtod(lacp_tx_buf[i], char *), &lacpdu, sizeof(lacpdu)); rte_pktmbuf_pkt_len(lacp_tx_buf[i]) = sizeof(lacpdu); } - retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 1); - TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device"); + retval = initialize_bonding_device_with_members(TEST_TX_MEMBER_COUNT, 1); + TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonding device"); memset(lacpdu_rx_count, 0, sizeof(lacpdu_rx_count)); @@ -1466,22 +1468,22 @@ test_mode4_ext_lacp(void) for (i = 0; i < 30; ++i) rte_delay_ms(delay); - FOR_EACH_SLAVE(i, slave) { + FOR_EACH_MEMBER(i, member) { retval = rte_eth_bond_8023ad_ext_slowtx( - test_params.bonded_port_id, - slave->port_id, lacp_tx_buf[i]); + test_params.bonding_port_id, + member->port_id, lacp_tx_buf[i]); TEST_ASSERT_SUCCESS(retval, - "Slave should allow manual LACP xmit"); + "Member should allow manual LACP xmit"); } nb_pkts = bond_tx(NULL, 0); TEST_ASSERT_EQUAL(nb_pkts, 0, "Packets transmitted unexpectedly"); - FOR_EACH_SLAVE(i, slave) { - nb_pkts = slave_get_pkts(slave, buf, RTE_DIM(buf)); - TEST_ASSERT_EQUAL(nb_pkts, 1, "found %u packets on slave %d\n", + FOR_EACH_MEMBER(i, member) { + nb_pkts = member_get_pkts(member, buf, RTE_DIM(buf)); + TEST_ASSERT_EQUAL(nb_pkts, 1, "found %u packets on member %d\n", nb_pkts, i); - slave_put_pkts(slave, buf, nb_pkts); + member_put_pkts(member, buf, nb_pkts); } nb_pkts = bond_rx(buf, RTE_DIM(buf)); @@ -1489,26 +1491,26 @@ test_mode4_ext_lacp(void) TEST_ASSERT_EQUAL(nb_pkts, 0, "Packets received unexpectedly"); /* wait for the periodic callback to run */ - for (i = 0; i < 30 && all_slaves_done == 0; ++i) { + for (i = 0; i < 30 && all_members_done == 0; ++i) { uint8_t s, total = 0; rte_delay_ms(delay); - FOR_EACH_SLAVE(s, slave) { - total += lacpdu_rx_count[slave->port_id]; + FOR_EACH_MEMBER(s, member) { + total += lacpdu_rx_count[member->port_id]; } - if (total >= SLAVE_COUNT) - all_slaves_done = 1; + if (total >= MEMBER_COUNT) + all_members_done = 1; } - FOR_EACH_SLAVE(i, slave) { - TEST_ASSERT_EQUAL(lacpdu_rx_count[slave->port_id], 1, - "Slave port %u should have received 1 lacpdu (count=%u)", - slave->port_id, - lacpdu_rx_count[slave->port_id]); + FOR_EACH_MEMBER(i, member) { + TEST_ASSERT_EQUAL(lacpdu_rx_count[member->port_id], 1, + "Member port %u should have received 1 lacpdu (count=%u)", + member->port_id, + lacpdu_rx_count[member->port_id]); } - retval = remove_slaves_and_stop_bonded_device(); + retval = remove_members_and_stop_bonding_device(); TEST_ASSERT_SUCCESS(retval, "Test cleanup failed."); return TEST_SUCCESS; @@ -1517,10 +1519,10 @@ test_mode4_ext_lacp(void) static int check_environment(void) { - struct slave_conf *port; + struct member_conf *port; uint8_t i, env_state; - uint16_t slaves[RTE_DIM(test_params.slave_ports)]; - int slaves_count; + uint16_t members[RTE_DIM(test_params.member_ports)]; + int members_count; env_state = 0; FOR_EACH_PORT(i, port) { @@ -1530,7 +1532,7 @@ check_environment(void) if (rte_ring_count(port->tx_queue) != 0) env_state |= 0x02; - if (port->bonded != 0) + if (port->bonding != 0) env_state |= 0x04; if (port->lacp_parnter_state != 0) @@ -1540,20 +1542,20 @@ check_environment(void) break; } - slaves_count = rte_eth_bond_slaves_get(test_params.bonded_port_id, - slaves, RTE_DIM(slaves)); + members_count = rte_eth_bond_members_get(test_params.bonding_port_id, + members, RTE_DIM(members)); - if (slaves_count != 0) + if (members_count != 0) env_state |= 0x10; TEST_ASSERT_EQUAL(env_state, 0, "Environment not clean (port %u):%s%s%s%s%s", port->port_id, - env_state & 0x01 ? " slave rx queue not clean" : "", - env_state & 0x02 ? " slave tx queue not clean" : "", - env_state & 0x04 ? " port marked as enslaved" : "", - env_state & 0x80 ? " slave state is not reset" : "", - env_state & 0x10 ? " slave count not equal 0" : "."); + env_state & 0x01 ? " member rx queue not clean" : "", + env_state & 0x02 ? " member tx queue not clean" : "", + env_state & 0x04 ? " port marked as enmemberd" : "", + env_state & 0x80 ? " member state is not reset" : "", + env_state & 0x10 ? " member count not equal 0" : "."); return TEST_SUCCESS; @@ -1562,7 +1564,7 @@ check_environment(void) static int test_mode4_executor(int (*test_func)(void)) { - struct slave_conf *port; + struct member_conf *port; int test_result; uint8_t i; void *pkt; @@ -1581,8 +1583,8 @@ test_mode4_executor(int (*test_func)(void)) /* Reset environment in case test failed to do that. */ if (test_result != TEST_SUCCESS) { - TEST_ASSERT_SUCCESS(remove_slaves_and_stop_bonded_device(), - "Failed to stop bonded device"); + TEST_ASSERT_SUCCESS(remove_members_and_stop_bonding_device(), + "Failed to stop bonding device"); FOR_EACH_PORT(i, port) { while (rte_ring_count(port->rx_queue) != 0) { @@ -1674,4 +1676,4 @@ test_link_bonding_mode4(void) return unit_test_suite_runner(&link_bonding_mode4_test_suite); } -REGISTER_TEST_COMMAND(link_bonding_mode4_autotest, test_link_bonding_mode4); +REGISTER_DRIVER_TEST(link_bonding_mode4_autotest, test_link_bonding_mode4); diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c index 464fb2dbd0b..cd94e9e5dce 100644 --- a/app/test/test_link_bonding_rssconf.c +++ b/app/test/test_link_bonding_rssconf.c @@ -27,15 +27,15 @@ #include "test.h" -#define SLAVE_COUNT (4) +#define MEMBER_COUNT (4) #define RXTX_RING_SIZE 1024 #define RXTX_QUEUE_COUNT 4 -#define BONDED_DEV_NAME ("net_bonding_rss") +#define BONDING_DEV_NAME ("net_bonding_rss") -#define SLAVE_DEV_NAME_FMT ("net_null%d") -#define SLAVE_RXTX_QUEUE_FMT ("rssconf_slave%d_q%d") +#define MEMBER_DEV_NAME_FMT ("net_null%d") +#define MEMBER_RXTX_QUEUE_FMT ("rssconf_member%d_q%d") #define NUM_MBUFS 8191 #define MBUF_SIZE (1600 + RTE_PKTMBUF_HEADROOM) @@ -46,7 +46,7 @@ #define INVALID_PORT_ID (0xFF) #define INVALID_BONDING_MODE (-1) -struct slave_conf { +struct member_conf { uint16_t port_id; struct rte_eth_dev_info dev_info; @@ -54,7 +54,7 @@ struct slave_conf { uint8_t rss_key[40]; struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE]; - uint8_t is_slave; + uint8_t is_member; struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT]; }; @@ -62,15 +62,15 @@ struct link_bonding_rssconf_unittest_params { uint8_t bond_port_id; struct rte_eth_dev_info bond_dev_info; struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE]; - struct slave_conf slave_ports[SLAVE_COUNT]; + struct member_conf member_ports[MEMBER_COUNT]; struct rte_mempool *mbuf_pool; }; static struct link_bonding_rssconf_unittest_params test_params = { .bond_port_id = INVALID_PORT_ID, - .slave_ports = { - [0 ... SLAVE_COUNT - 1] = { .port_id = INVALID_PORT_ID, .is_slave = 0} + .member_ports = { + [0 ... MEMBER_COUNT - 1] = { .port_id = INVALID_PORT_ID, .is_member = 0} }, .mbuf_pool = NULL, }; @@ -107,14 +107,14 @@ static struct rte_eth_conf rss_pmd_conf = { #define FOR_EACH(_i, _item, _array, _size) \ for (_i = 0, _item = &_array[0]; _i < _size && (_item = &_array[_i]); _i++) -/* Macro for iterating over every port that can be used as a slave +/* Macro for iterating over every port that can be used as a member * in this test. - * _i variable used as an index in test_params->slave_ports - * _slave pointer to &test_params->slave_ports[_idx] + * _i variable used as an index in test_params->member_ports + * _member pointer to &test_params->member_ports[_idx] */ #define FOR_EACH_PORT(_i, _port) \ - FOR_EACH(_i, _port, test_params.slave_ports, \ - RTE_DIM(test_params.slave_ports)) + FOR_EACH(_i, _port, test_params.member_ports, \ + RTE_DIM(test_params.member_ports)) static int configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf, @@ -151,21 +151,21 @@ configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf, } /** - * Remove all slaves from bonding + * Remove all members from bonding */ static int -remove_slaves(void) +remove_members(void) { unsigned n; - struct slave_conf *port; + struct member_conf *port; FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; - if (port->is_slave) { - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_remove( + port = &test_params.member_ports[n]; + if (port->is_member) { + TEST_ASSERT_SUCCESS(rte_eth_bond_member_remove( test_params.bond_port_id, port->port_id), - "Cannot remove slave %d from bonding", port->port_id); - port->is_slave = 0; + "Cannot remove member %d from bonding", port->port_id); + port->is_member = 0; } } @@ -173,30 +173,30 @@ remove_slaves(void) } static int -remove_slaves_and_stop_bonded_device(void) +remove_members_and_stop_bonding_device(void) { - TEST_ASSERT_SUCCESS(remove_slaves(), "Removing slaves"); + TEST_ASSERT_SUCCESS(remove_members(), "Removing members"); TEST_ASSERT_SUCCESS(rte_eth_dev_stop(test_params.bond_port_id), "Failed to stop port %u", test_params.bond_port_id); return TEST_SUCCESS; } /** - * Add all slaves to bonding + * Add all members to bonding */ static int -bond_slaves(void) +bond_members(void) { unsigned n; - struct slave_conf *port; + struct member_conf *port; FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; - if (!port->is_slave) { - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params.bond_port_id, - port->port_id), "Cannot attach slave %d to the bonding", + port = &test_params.member_ports[n]; + if (!port->is_member) { + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(test_params.bond_port_id, + port->port_id), "Cannot attach member %d to the bonding", port->port_id); - port->is_slave = 1; + port->is_member = 1; } } @@ -223,11 +223,11 @@ reta_set(uint16_t port_id, uint8_t value, int reta_size) } /** - * Check if slaves RETA is synchronized with bonding port. Returns 1 if slave + * Check if members RETA is synchronized with bonding port. Returns 1 if member * port is synced with bonding port. */ static int -reta_check_synced(struct slave_conf *port) +reta_check_synced(struct member_conf *port) { unsigned i; @@ -264,10 +264,10 @@ bond_reta_fetch(void) { } /** - * Fetch slaves RETA + * Fetch members RETA */ static int -slave_reta_fetch(struct slave_conf *port) { +member_reta_fetch(struct member_conf *port) { unsigned j; for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++) @@ -280,49 +280,49 @@ slave_reta_fetch(struct slave_conf *port) { } /** - * Remove and add slave to check if slaves configuration is synced with - * the bonding ports values after adding new slave. + * Remove and add member to check if members configuration is synced with + * the bonding ports values after adding new member. */ static int -slave_remove_and_add(void) +member_remove_and_add(void) { - struct slave_conf *port = &(test_params.slave_ports[0]); + struct member_conf *port = &(test_params.member_ports[0]); - /* 1. Remove first slave from bonding */ - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_remove(test_params.bond_port_id, - port->port_id), "Cannot remove slave #d from bonding"); + /* 1. Remove first member from bonding */ + TEST_ASSERT_SUCCESS(rte_eth_bond_member_remove(test_params.bond_port_id, + port->port_id), "Cannot remove member #d from bonding"); - /* 2. Change removed (ex-)slave and bonding configuration to different + /* 2. Change removed (ex-)member and bonding configuration to different * values */ reta_set(test_params.bond_port_id, 1, test_params.bond_dev_info.reta_size); bond_reta_fetch(); reta_set(port->port_id, 2, port->dev_info.reta_size); - slave_reta_fetch(port); + member_reta_fetch(port); TEST_ASSERT(reta_check_synced(port) == 0, - "Removed slave didn't should be synchronized with bonding port"); + "Removed member didn't should be synchronized with bonding port"); - /* 3. Add (ex-)slave and check if configuration changed*/ - TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params.bond_port_id, - port->port_id), "Cannot add slave"); + /* 3. Add (ex-)member and check if configuration changed*/ + TEST_ASSERT_SUCCESS(rte_eth_bond_member_add(test_params.bond_port_id, + port->port_id), "Cannot add member"); bond_reta_fetch(); - slave_reta_fetch(port); + member_reta_fetch(port); return reta_check_synced(port); } /** - * Test configuration propagation over slaves. + * Test configuration propagation over members. */ static int test_propagate(void) { unsigned i; uint8_t n; - struct slave_conf *port; + struct member_conf *port; uint8_t bond_rss_key[40]; struct rte_eth_rss_conf bond_rss_conf; @@ -349,18 +349,18 @@ test_propagate(void) retval = rte_eth_dev_rss_hash_update(test_params.bond_port_id, &bond_rss_conf); - TEST_ASSERT_SUCCESS(retval, "Cannot set slaves hash function"); + TEST_ASSERT_SUCCESS(retval, "Cannot set members hash function"); FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; retval = rte_eth_dev_rss_hash_conf_get(port->port_id, &port->rss_conf); TEST_ASSERT_SUCCESS(retval, - "Cannot take slaves RSS configuration"); + "Cannot take members RSS configuration"); TEST_ASSERT(port->rss_conf.rss_hf == rss_hf, - "Hash function not propagated for slave %d", + "Hash function not propagated for member %d", port->port_id); } @@ -376,11 +376,11 @@ test_propagate(void) /* Set all keys to zero */ FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; memset(port->rss_conf.rss_key, 0, 40); retval = rte_eth_dev_rss_hash_update(port->port_id, &port->rss_conf); - TEST_ASSERT_SUCCESS(retval, "Cannot set slaves RSS keys"); + TEST_ASSERT_SUCCESS(retval, "Cannot set members RSS keys"); } memset(bond_rss_key, i, sizeof(bond_rss_key)); @@ -390,21 +390,21 @@ test_propagate(void) retval = rte_eth_dev_rss_hash_update(test_params.bond_port_id, &bond_rss_conf); - TEST_ASSERT_SUCCESS(retval, "Cannot set bonded port RSS keys"); + TEST_ASSERT_SUCCESS(retval, "Cannot set bonding port RSS keys"); FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; retval = rte_eth_dev_rss_hash_conf_get(port->port_id, &(port->rss_conf)); TEST_ASSERT_SUCCESS(retval, - "Cannot take slaves RSS configuration"); + "Cannot take members RSS configuration"); /* compare keys */ retval = memcmp(port->rss_conf.rss_key, bond_rss_key, sizeof(bond_rss_key)); - TEST_ASSERT(retval == 0, "Key value not propagated for slave %d", + TEST_ASSERT(retval == 0, "Key value not propagated for member %d", port->port_id); } } @@ -416,22 +416,22 @@ test_propagate(void) /* Set all keys to zero */ FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; retval = reta_set(port->port_id, (i + 1) % RXTX_QUEUE_COUNT, port->dev_info.reta_size); - TEST_ASSERT_SUCCESS(retval, "Cannot set slaves RETA"); + TEST_ASSERT_SUCCESS(retval, "Cannot set members RETA"); } TEST_ASSERT_SUCCESS(reta_set(test_params.bond_port_id, i % RXTX_QUEUE_COUNT, test_params.bond_dev_info.reta_size), - "Cannot set bonded port RETA"); + "Cannot set bonding port RETA"); bond_reta_fetch(); FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; - slave_reta_fetch(port); + member_reta_fetch(port); TEST_ASSERT(reta_check_synced(port) == 1, "RETAs inconsistent"); } } @@ -459,29 +459,29 @@ test_rss(void) "Error during getting device (port %u) info: %s\n", test_params.bond_port_id, strerror(-ret)); - TEST_ASSERT_SUCCESS(bond_slaves(), "Bonding slaves failed"); + TEST_ASSERT_SUCCESS(bond_members(), "Bonding members failed"); TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bond_port_id), "Failed to start bonding port (%d).", test_params.bond_port_id); TEST_ASSERT_SUCCESS(test_propagate(), "Propagation test failed"); - TEST_ASSERT(slave_remove_and_add() == 1, "remove and add slaves success."); + TEST_ASSERT(member_remove_and_add() == 1, "remove and add members success."); - remove_slaves_and_stop_bonded_device(); + remove_members_and_stop_bonding_device(); return TEST_SUCCESS; } /** - * Test RSS configuration over bonded and slaves. + * Test RSS configuration over bonding and members. */ static int test_rss_config_lazy(void) { struct rte_eth_rss_conf bond_rss_conf = {0}; - struct slave_conf *port; + struct member_conf *port; uint8_t rss_key[40]; uint64_t rss_hf; int retval; @@ -499,24 +499,24 @@ test_rss_config_lazy(void) bond_rss_conf.rss_hf = rss_hf; retval = rte_eth_dev_rss_hash_update(test_params.bond_port_id, &bond_rss_conf); - TEST_ASSERT(retval != 0, "Succeeded in setting bonded port hash function"); + TEST_ASSERT(retval != 0, "Succeeded in setting bonding port hash function"); } - /* Set all keys to zero for all slaves */ + /* Set all keys to zero for all members */ FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; retval = rte_eth_dev_rss_hash_conf_get(port->port_id, &port->rss_conf); - TEST_ASSERT_SUCCESS(retval, "Cannot get slaves RSS configuration"); + TEST_ASSERT_SUCCESS(retval, "Cannot get members RSS configuration"); memset(port->rss_key, 0, sizeof(port->rss_key)); port->rss_conf.rss_key = port->rss_key; port->rss_conf.rss_key_len = sizeof(port->rss_key); retval = rte_eth_dev_rss_hash_update(port->port_id, &port->rss_conf); - TEST_ASSERT(retval != 0, "Succeeded in setting slaves RSS keys"); + TEST_ASSERT(retval != 0, "Succeeded in setting members RSS keys"); } - /* Set RSS keys for bonded port */ + /* Set RSS keys for bonding port */ memset(rss_key, 1, sizeof(rss_key)); bond_rss_conf.rss_hf = rss_hf; bond_rss_conf.rss_key = rss_key; @@ -524,20 +524,20 @@ test_rss_config_lazy(void) retval = rte_eth_dev_rss_hash_update(test_params.bond_port_id, &bond_rss_conf); - TEST_ASSERT(retval != 0, "Succeeded in setting bonded port RSS keys"); + TEST_ASSERT(retval != 0, "Succeeded in setting bonding port RSS keys"); /* Test RETA propagation */ for (i = 0; i < RXTX_QUEUE_COUNT; i++) { FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; retval = reta_set(port->port_id, (i + 1) % RXTX_QUEUE_COUNT, port->dev_info.reta_size); - TEST_ASSERT(retval != 0, "Succeeded in setting slaves RETA"); + TEST_ASSERT(retval != 0, "Succeeded in setting members RETA"); } retval = reta_set(test_params.bond_port_id, i % RXTX_QUEUE_COUNT, test_params.bond_dev_info.reta_size); - TEST_ASSERT(retval != 0, "Succeeded in setting bonded port RETA"); + TEST_ASSERT(retval != 0, "Succeeded in setting bonding port RETA"); } return TEST_SUCCESS; @@ -560,14 +560,14 @@ test_rss_lazy(void) "Error during getting device (port %u) info: %s\n", test_params.bond_port_id, strerror(-ret)); - TEST_ASSERT_SUCCESS(bond_slaves(), "Bonding slaves failed"); + TEST_ASSERT_SUCCESS(bond_members(), "Bonding members failed"); TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bond_port_id), "Failed to start bonding port (%d).", test_params.bond_port_id); TEST_ASSERT_SUCCESS(test_rss_config_lazy(), "Succeeded in setting RSS hash when RX_RSS mq_mode is turned off"); - remove_slaves_and_stop_bonded_device(); + remove_members_and_stop_bonding_device(); return TEST_SUCCESS; } @@ -579,13 +579,13 @@ test_setup(void) int retval; int port_id; char name[256]; - struct slave_conf *port; + struct member_conf *port; struct rte_ether_addr mac_addr = { .addr_bytes = {0} }; if (test_params.mbuf_pool == NULL) { test_params.mbuf_pool = rte_pktmbuf_pool_create( - "RSS_MBUF_POOL", NUM_MBUFS * SLAVE_COUNT, + "RSS_MBUF_POOL", NUM_MBUFS * MEMBER_COUNT, MBUF_CACHE_SIZE, 0, MBUF_SIZE, rte_socket_id()); TEST_ASSERT(test_params.mbuf_pool != NULL, @@ -594,10 +594,10 @@ test_setup(void) /* Create / initialize ring eth devs. */ FOR_EACH_PORT(n, port) { - port = &test_params.slave_ports[n]; + port = &test_params.member_ports[n]; port_id = rte_eth_dev_count_avail(); - snprintf(name, sizeof(name), SLAVE_DEV_NAME_FMT, port_id); + snprintf(name, sizeof(name), MEMBER_DEV_NAME_FMT, port_id); retval = rte_vdev_init(name, "size=64,copy=0"); TEST_ASSERT_SUCCESS(retval, "Failed to create null device '%s'\n", @@ -624,10 +624,10 @@ test_setup(void) } if (test_params.bond_port_id == INVALID_PORT_ID) { - retval = rte_eth_bond_create(BONDED_DEV_NAME, 0, rte_socket_id()); + retval = rte_eth_bond_create(BONDING_DEV_NAME, 0, rte_socket_id()); - TEST_ASSERT(retval >= 0, "Failed to create bonded ethdev %s", - BONDED_DEV_NAME); + TEST_ASSERT(retval >= 0, "Failed to create bonding ethdev %s", + BONDING_DEV_NAME); test_params.bond_port_id = retval; @@ -647,7 +647,7 @@ test_setup(void) static void testsuite_teardown(void) { - struct slave_conf *port; + struct member_conf *port; uint8_t i; /* Only stop ports. @@ -685,8 +685,8 @@ test_rssconf_executor(int (*test_func)(void)) /* Reset environment in case test failed to do that. */ if (test_result != TEST_SUCCESS) { - TEST_ASSERT_SUCCESS(remove_slaves_and_stop_bonded_device(), - "Failed to stop bonded device"); + TEST_ASSERT_SUCCESS(remove_members_and_stop_bonding_device(), + "Failed to stop bonding device"); } return test_result; @@ -728,4 +728,4 @@ test_link_bonding_rssconf(void) return unit_test_suite_runner(&link_bonding_rssconf_test_suite); } -REGISTER_TEST_COMMAND(link_bonding_rssconf_autotest, test_link_bonding_rssconf); +REGISTER_DRIVER_TEST(link_bonding_rssconf_autotest, test_link_bonding_rssconf); diff --git a/app/test/test_logs.c b/app/test/test_logs.c index 8da8824bee6..43b09704a3f 100644 --- a/app/test/test_logs.c +++ b/app/test/test_logs.c @@ -158,4 +158,4 @@ test_logs(void) return 0; } -REGISTER_TEST_COMMAND(logs_autotest, test_logs); +REGISTER_FAST_TEST(logs_autotest, true, true, test_logs); diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c index 37b460af3a9..b93e11d7003 100644 --- a/app/test/test_lpm.c +++ b/app/test/test_lpm.c @@ -1584,4 +1584,4 @@ test_lpm(void) return global_status; } -REGISTER_TEST_COMMAND(lpm_autotest, test_lpm); +REGISTER_FAST_TEST(lpm_autotest, true, true, test_lpm); diff --git a/app/test/test_lpm6.c b/app/test/test_lpm6.c index b6b6f8615ea..1d8a0afa115 100644 --- a/app/test/test_lpm6.c +++ b/app/test/test_lpm6.c @@ -1793,4 +1793,4 @@ test_lpm6(void) return global_status; } -REGISTER_TEST_COMMAND(lpm6_autotest, test_lpm6); +REGISTER_FAST_TEST(lpm6_autotest, true, true, test_lpm6); diff --git a/app/test/test_lpm6_perf.c b/app/test/test_lpm6_perf.c index 5b684686a68..8a49f74c84e 100644 --- a/app/test/test_lpm6_perf.c +++ b/app/test/test_lpm6_perf.c @@ -161,4 +161,4 @@ test_lpm6_perf(void) return 0; } -REGISTER_TEST_COMMAND(lpm6_perf_autotest, test_lpm6_perf); +REGISTER_PERF_TEST(lpm6_perf_autotest, test_lpm6_perf); diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c index e72437ba385..15ff396dd0c 100644 --- a/app/test/test_lpm_perf.c +++ b/app/test/test_lpm_perf.c @@ -760,4 +760,4 @@ test_lpm_perf(void) return 0; } -REGISTER_TEST_COMMAND(lpm_perf_autotest, test_lpm_perf); +REGISTER_PERF_TEST(lpm_perf_autotest, test_lpm_perf); diff --git a/app/test/test_malloc.c b/app/test/test_malloc.c index ff081dd9313..cd579c503cf 100644 --- a/app/test/test_malloc.c +++ b/app/test/test_malloc.c @@ -1091,4 +1091,4 @@ test_malloc(void) return 0; } -REGISTER_TEST_COMMAND(malloc_autotest, test_malloc); +REGISTER_FAST_TEST(malloc_autotest, false, true, test_malloc); diff --git a/app/test/test_malloc_perf.c b/app/test/test_malloc_perf.c index 9bd16629815..a99bfd85315 100644 --- a/app/test/test_malloc_perf.c +++ b/app/test/test_malloc_perf.c @@ -171,4 +171,4 @@ test_malloc_perf(void) return 0; } -REGISTER_TEST_COMMAND(malloc_perf_autotest, test_malloc_perf); +REGISTER_PERF_TEST(malloc_perf_autotest, test_malloc_perf); diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c index efac01806be..d7393df7eb5 100644 --- a/app/test/test_mbuf.c +++ b/app/test/test_mbuf.c @@ -2955,4 +2955,4 @@ test_mbuf(void) } #undef GOTO_FAIL -REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf); +REGISTER_FAST_TEST(mbuf_autotest, false, true, test_mbuf); diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c index 52e45e7e2a7..46ff13cc00a 100644 --- a/app/test/test_mcslock.c +++ b/app/test/test_mcslock.c @@ -36,9 +36,9 @@ * lock multiple times. */ -rte_mcslock_t *p_ml; -rte_mcslock_t *p_ml_try; -rte_mcslock_t *p_ml_perf; +RTE_ATOMIC(rte_mcslock_t *) p_ml; +RTE_ATOMIC(rte_mcslock_t *) p_ml_try; +RTE_ATOMIC(rte_mcslock_t *) p_ml_perf; static unsigned int count; @@ -241,4 +241,4 @@ test_mcslock(void) return ret; } -REGISTER_TEST_COMMAND(mcslock_autotest, test_mcslock); +REGISTER_FAST_TEST(mcslock_autotest, false, true, test_mcslock); diff --git a/app/test/test_member.c b/app/test/test_member.c index 4a93f8bff47..5a4d2750db3 100644 --- a/app/test/test_member.c +++ b/app/test/test_member.c @@ -996,4 +996,4 @@ test_member(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(member_autotest, test_member); +REGISTER_FAST_TEST(member_autotest, true, true, test_member); diff --git a/app/test/test_member_perf.c b/app/test/test_member_perf.c index 2f79888fbde..db6b8a18efd 100644 --- a/app/test/test_member_perf.c +++ b/app/test/test_member_perf.c @@ -780,4 +780,4 @@ test_member_perf(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(member_perf_autotest, test_member_perf); +REGISTER_PERF_TEST(member_perf_autotest, test_member_perf); diff --git a/app/test/test_memcpy.c b/app/test/test_memcpy.c index 1ab86f4967c..802dc4631bd 100644 --- a/app/test/test_memcpy.c +++ b/app/test/test_memcpy.c @@ -129,4 +129,4 @@ test_memcpy(void) return 0; } -REGISTER_TEST_COMMAND(memcpy_autotest, test_memcpy); +REGISTER_FAST_TEST(memcpy_autotest, true, true, test_memcpy); diff --git a/app/test/test_memcpy_perf.c b/app/test/test_memcpy_perf.c index 3727c160e6b..5c05a84619e 100644 --- a/app/test/test_memcpy_perf.c +++ b/app/test/test_memcpy_perf.c @@ -348,4 +348,4 @@ test_memcpy_perf(void) return 0; } -REGISTER_TEST_COMMAND(memcpy_perf_autotest, test_memcpy_perf); +REGISTER_PERF_TEST(memcpy_perf_autotest, test_memcpy_perf); diff --git a/app/test/test_memory.c b/app/test/test_memory.c index 440e5ef8387..ea37f62338e 100644 --- a/app/test/test_memory.c +++ b/app/test/test_memory.c @@ -110,4 +110,4 @@ test_memory(void) return 0; } -REGISTER_TEST_COMMAND(memory_autotest, test_memory); +REGISTER_FAST_TEST(memory_autotest, false, true, test_memory); diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index 8e493eda47f..ad7ebd63633 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -1043,4 +1043,4 @@ test_mempool(void) return ret; } -REGISTER_TEST_COMMAND(mempool_autotest, test_mempool); +REGISTER_FAST_TEST(mempool_autotest, false, true, test_mempool); diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c index ce7c6241ab0..96de347f044 100644 --- a/app/test/test_mempool_perf.c +++ b/app/test/test_mempool_perf.c @@ -437,4 +437,4 @@ test_mempool_perf(void) return ret; } -REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf); +REGISTER_PERF_TEST(mempool_perf_autotest, test_mempool_perf); diff --git a/app/test/test_memzone.c b/app/test/test_memzone.c index f10f4fd9cd3..37ae7afc95e 100644 --- a/app/test/test_memzone.c +++ b/app/test/test_memzone.c @@ -1163,4 +1163,4 @@ test_memzone(void) return 0; } -REGISTER_TEST_COMMAND(memzone_autotest, test_memzone); +REGISTER_FAST_TEST(memzone_autotest, false, true, test_memzone); diff --git a/app/test/test_meter.c b/app/test/test_meter.c index 15d5a4839b4..6241b75ba0c 100644 --- a/app/test/test_meter.c +++ b/app/test/test_meter.c @@ -713,4 +713,4 @@ test_meter(void) } -REGISTER_TEST_COMMAND(meter_autotest, test_meter); +REGISTER_FAST_TEST(meter_autotest, true, true, test_meter); diff --git a/app/test/test_metrics.c b/app/test/test_metrics.c index 11222133d03..917bee2e37f 100644 --- a/app/test/test_metrics.c +++ b/app/test/test_metrics.c @@ -326,4 +326,4 @@ test_metrics(void) return unit_test_suite_runner(&metrics_testsuite); } -REGISTER_TEST_COMMAND(metrics_autotest, test_metrics); +REGISTER_FAST_TEST(metrics_autotest, true, true, test_metrics); diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c index ad47d578f22..f3694530a86 100644 --- a/app/test/test_mp_secondary.c +++ b/app/test/test_mp_secondary.c @@ -223,4 +223,4 @@ test_mp_secondary(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(multiprocess_autotest, test_mp_secondary); +REGISTER_FAST_TEST(multiprocess_autotest, false, false, test_mp_secondary); diff --git a/app/test/test_net_ether.c b/app/test/test_net_ether.c new file mode 100644 index 00000000000..1cb6845a9c4 --- /dev/null +++ b/app/test/test_net_ether.c @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Stephen Hemminger + */ + +#include + +#include +#include "test.h" + +#define N 1000000 + +static const struct rte_ether_addr zero_ea; +static const struct rte_ether_addr bcast_ea = { + .addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, +}; + +static int +test_ether_addr(void) +{ + struct rte_ether_addr rand_ea = { }; + unsigned int i; + + RTE_TEST_ASSERT(rte_is_zero_ether_addr(&zero_ea), "Zero address is not zero"); + RTE_TEST_ASSERT(!rte_is_zero_ether_addr(&bcast_ea), "Broadcast is zero"); + + for (i = 0; i < N; i++) { + rte_eth_random_addr(rand_ea.addr_bytes); + RTE_TEST_ASSERT(!rte_is_zero_ether_addr(&rand_ea), + "Random address is zero"); + RTE_TEST_ASSERT(rte_is_unicast_ether_addr(&rand_ea), + "Random address is not unicast"); + RTE_TEST_ASSERT(rte_is_local_admin_ether_addr(&rand_ea), + "Random address is not local admin"); + } + + return 0; +} + +static int +test_format_addr(void) +{ + struct rte_ether_addr rand_ea = { }; + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + unsigned int i; + + for (i = 0; i < N; i++) { + struct rte_ether_addr result = { }; + int ret; + + rte_eth_random_addr(rand_ea.addr_bytes); + + rte_ether_format_addr(buf, sizeof(buf), &rand_ea); + + ret = rte_ether_unformat_addr(buf, &result); + if (ret != 0) { + fprintf(stderr, "rte_ether_unformat_addr(%s) failed\n", buf); + return -1; + } + RTE_TEST_ASSERT(rte_is_same_ether_addr(&rand_ea, &result), + "rte_ether_format/unformat mismatch"); + } + return 0; + +} + +static int +test_unformat_addr(void) +{ + const struct rte_ether_addr expected = { + .addr_bytes = { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc }, + }; + const struct rte_ether_addr nozero_ea = { + .addr_bytes = { 1, 2, 3, 4, 5, 6 }, + }; + struct rte_ether_addr result; + int ret; + + /* Test IETF format */ + memset(&result, 0, sizeof(result)); + ret = rte_ether_unformat_addr("12:34:56:78:9a:bc", &result); + RTE_TEST_ASSERT(ret == 0, "IETF unformat failed"); + RTE_TEST_ASSERT(rte_is_same_ether_addr(&expected, &result), + "IETF unformat mismatch"); + + /* Test IEEE format */ + memset(&result, 0, sizeof(result)); + ret = rte_ether_unformat_addr("12-34-56-78-9A-BC", &result); + RTE_TEST_ASSERT(ret == 0, "IEEE unformat failed"); + RTE_TEST_ASSERT(rte_is_same_ether_addr(&expected, &result), + "IEEE unformat mismatch"); + + /* Test Cisco format */ + memset(&result, 0, sizeof(result)); + ret = rte_ether_unformat_addr("1234.5678.9ABC", &result); + RTE_TEST_ASSERT(ret == 0, "Cisco unformat failed"); + RTE_TEST_ASSERT(rte_is_same_ether_addr(&expected, &result), + "Cisco unformat mismatch"); + + /* Test no leading zeros - IETF */ + memset(&result, 0, sizeof(result)); + ret = rte_ether_unformat_addr("1:2:3:4:5:6", &result); + RTE_TEST_ASSERT(ret == 0, "IETF leading zero failed"); + RTE_TEST_ASSERT(rte_is_same_ether_addr(&nozero_ea, &result), + "IETF leading zero mismatch"); + + /* Test no-leading zero - IEEE format */ + memset(&result, 0, sizeof(result)); + ret = rte_ether_unformat_addr("1-2-3-4-5-6", &result); + RTE_TEST_ASSERT(ret == 0, "IEEE leading zero failed"); + RTE_TEST_ASSERT(rte_is_same_ether_addr(&nozero_ea, &result), + "IEEE leading zero mismatch"); + + + return 0; +} + +static int +test_invalid_addr(void) +{ + static const char * const invalid[] = { + "123", + "123:456", + "12:34:56:78:9a:gh", + "12:34:56:78:9a", + "100:34:56:78:9a:bc", + "34-56-78-9a-bc", + "12:34:56-78:9a:bc", + "12:34:56.78:9a:bc", + "123:456:789:abc", + "NOT.AN.ADDRESS", + "102.304.506", + "", + }; + struct rte_ether_addr result; + unsigned int i; + + for (i = 0; i < RTE_DIM(invalid); ++i) { + if (!rte_ether_unformat_addr(invalid[i], &result)) { + fprintf(stderr, "rte_ether_unformat_addr(%s) succeeded!\n", + invalid[i]); + return -1; + } + } + return 0; +} + +static int +test_net_ether(void) +{ + if (test_ether_addr()) + return -1; + + if (test_format_addr()) + return -1; + + if (test_unformat_addr()) + return -1; + + if (test_invalid_addr()) + return -1; + + return 0; +} + +REGISTER_FAST_TEST(net_ether_autotest, true, true, test_net_ether); diff --git a/app/test/test_pdcp.c b/app/test/test_pdcp.c index 6c73c1db369..49bc8641a0e 100644 --- a/app/test/test_pdcp.c +++ b/app/test/test_pdcp.c @@ -15,6 +15,7 @@ #include "test.h" #include "test_cryptodev.h" +#include "test_cryptodev_security_pdcp_sdap_test_vectors.h" #include "test_cryptodev_security_pdcp_test_vectors.h" #define NSECPERSEC 1E9 @@ -23,7 +24,8 @@ #define TEST_EV_QUEUE_ID 0 #define TEST_EV_PORT_ID 0 #define CDEV_INVALID_ID UINT8_MAX -#define NB_TESTS RTE_DIM(pdcp_test_params) +#define NB_BASIC_TESTS RTE_DIM(pdcp_test_params) +#define NB_SDAP_TESTS RTE_DIM(list_pdcp_sdap_tests) #define PDCP_IV_LEN 16 /* Assert that condition is true, or goto the mark */ @@ -72,24 +74,71 @@ struct pdcp_test_conf { uint32_t output_len; }; -static int create_test_conf_from_index(const int index, struct pdcp_test_conf *conf); +enum pdcp_test_suite_type { + PDCP_TEST_SUITE_TY_BASIC, + PDCP_TEST_SUITE_TY_SDAP, +}; + +static int create_test_conf_from_index(const int index, struct pdcp_test_conf *conf, + enum pdcp_test_suite_type suite_type); typedef int (*test_with_conf_t)(struct pdcp_test_conf *conf); +static uint32_t +nb_tests_get(enum pdcp_test_suite_type type) +{ + uint32_t ret; + + switch (type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = NB_BASIC_TESTS; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = NB_SDAP_TESTS; + break; + default: + return 0; + } + + return ret; +} + +static const char* +pdcp_test_name_get(enum pdcp_test_suite_type type, int idx) +{ + const char *test_name = NULL; + + switch (type) { + case PDCP_TEST_SUITE_TY_BASIC: + test_name = pdcp_test_params[idx].name; + break; + case PDCP_TEST_SUITE_TY_SDAP: + test_name = list_pdcp_sdap_tests[idx].param.name; + break; + default: + return NULL; + } + + return test_name; +} + static int -run_test_foreach_known_vec(test_with_conf_t test, bool stop_on_first_pass) +run_test_foreach_known_vec(test_with_conf_t test, bool stop_on_first_pass, + enum pdcp_test_suite_type suite_type) { struct pdcp_test_conf test_conf; bool all_tests_skipped = true; + uint32_t nb_tests = nb_tests_get(suite_type); uint32_t i; int ret; - for (i = 0; i < NB_TESTS; i++) { - create_test_conf_from_index(i, &test_conf); + for (i = 0; i < nb_tests; i++) { + create_test_conf_from_index(i, &test_conf, suite_type); ret = test(&test_conf); if (ret == TEST_FAILED) { - printf("[%03i] - %s - failed\n", i, pdcp_test_params[i].name); + printf("[%03i] - %s - failed\n", i, + pdcp_test_name_get(suite_type, i)); return TEST_FAILED; } @@ -113,7 +162,17 @@ run_test_with_all_known_vec(const void *args) { test_with_conf_t test = args; - return run_test_foreach_known_vec(test, false); + return run_test_foreach_known_vec(test, false, + PDCP_TEST_SUITE_TY_BASIC); +} + +static int +run_test_with_all_sdap_known_vec(const void *args) +{ + test_with_conf_t test = args; + + return run_test_foreach_known_vec(test, false, + PDCP_TEST_SUITE_TY_SDAP); } static int @@ -121,7 +180,8 @@ run_test_with_all_known_vec_until_first_pass(const void *args) { test_with_conf_t test = args; - return run_test_foreach_known_vec(test, true); + return run_test_foreach_known_vec(test, true, + PDCP_TEST_SUITE_TY_BASIC); } static inline uint32_t @@ -522,13 +582,296 @@ pdcp_sn_to_raw_set(void *data, uint32_t sn, int size) } } +static uint8_t +pdcp_test_bearer_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint8_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_bearer[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].bearer; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + ret = -1; + + } + + return ret; +} + +static enum rte_security_pdcp_domain +pdcp_test_param_domain_get(enum pdcp_test_suite_type suite_type, const int index) +{ + enum rte_security_pdcp_domain ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_params[index].domain; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].param.domain; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + ret = -1; + } + + return ret; +} + +static uint8_t +pdcp_test_data_sn_size_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint8_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_data_sn_size[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].sn_size; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return -1; + + } + + return ret; +} + +static uint8_t +pdcp_test_packet_direction_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint8_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_packet_direction[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].packet_direction; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return -1; + } + + return ret; +} + +static enum rte_crypto_cipher_algorithm +pdcp_test_param_cipher_alg_get(enum pdcp_test_suite_type suite_type, const int index) +{ + enum rte_crypto_cipher_algorithm ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_params[index].cipher_alg; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].param.cipher_alg; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return 0; + } + + return ret; +} + +static uint8_t +pdcp_test_param_cipher_key_len_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint8_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_params[index].cipher_key_len; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].param.cipher_key_len; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return -1; + } + + return ret; +} + +static const uint8_t* +pdcp_test_crypto_key_get(enum pdcp_test_suite_type suite_type, const int index) +{ + const uint8_t *ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_crypto_key[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].cipher_key; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return NULL; + } + + return ret; +} + +static enum rte_crypto_auth_algorithm +pdcp_test_param_auth_alg_get(enum pdcp_test_suite_type suite_type, const int index) +{ + enum rte_crypto_auth_algorithm ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_params[index].auth_alg; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].param.auth_alg; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return 0; + } + + return ret; +} + +static uint8_t +pdcp_test_param_auth_key_len_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint8_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_params[index].auth_key_len; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].param.auth_key_len; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return -1; + } + + return ret; +} + +static const uint8_t* +pdcp_test_auth_key_get(enum pdcp_test_suite_type suite_type, const int index) +{ + const uint8_t *ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_auth_key[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].auth_key; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return NULL; + } + + return ret; +} + +static const uint8_t* +pdcp_test_data_in_get(enum pdcp_test_suite_type suite_type, const int index) +{ + const uint8_t *ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_data_in[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].data_in; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return NULL; + } + + return ret; +} + +static uint8_t +pdcp_test_data_in_len_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint8_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_data_in_len[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].in_len; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return -1; + } + + return ret; +} + +static const uint8_t* +pdcp_test_data_out_get(enum pdcp_test_suite_type suite_type, const int index) +{ + const uint8_t *ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_data_out[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].data_out; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return NULL; + } + + return ret; +} + +static uint32_t +pdcp_test_hfn_get(enum pdcp_test_suite_type suite_type, const int index) +{ + uint32_t ret; + + switch (suite_type) { + case PDCP_TEST_SUITE_TY_BASIC: + ret = pdcp_test_hfn[index]; + break; + case PDCP_TEST_SUITE_TY_SDAP: + ret = list_pdcp_sdap_tests[index].hfn; + break; + default: + RTE_LOG(ERR, USER1, "Invalid suite_type: %d\n", suite_type); + return -1; + } + + return ret; +} + static int -create_test_conf_from_index(const int index, struct pdcp_test_conf *conf) +create_test_conf_from_index(const int index, struct pdcp_test_conf *conf, + enum pdcp_test_suite_type suite_type) { const struct pdcp_testsuite_params *ts_params = &testsuite_params; struct rte_crypto_sym_xform c_xfrm, a_xfrm; + const uint8_t *data, *expected; uint32_t sn, expected_len; - uint8_t *data, *expected; int pdcp_hdr_sz; memset(conf, 0, sizeof(*conf)); @@ -538,41 +881,42 @@ create_test_conf_from_index(const int index, struct pdcp_test_conf *conf) conf->entity.sess_mpool = ts_params->sess_pool; conf->entity.cop_pool = ts_params->cop_pool; conf->entity.ctrl_pdu_pool = ts_params->mbuf_pool; - conf->entity.pdcp_xfrm.bearer = pdcp_test_bearer[index]; + conf->entity.pdcp_xfrm.bearer = pdcp_test_bearer_get(suite_type, index); conf->entity.pdcp_xfrm.en_ordering = 0; conf->entity.pdcp_xfrm.remove_duplicates = 0; - conf->entity.pdcp_xfrm.domain = pdcp_test_params[index].domain; + conf->entity.pdcp_xfrm.domain = pdcp_test_param_domain_get(suite_type, index); conf->entity.t_reordering = t_reorder_timer; - if (pdcp_test_packet_direction[index] == PDCP_DIR_UPLINK) + if (pdcp_test_packet_direction_get(suite_type, index) == PDCP_DIR_UPLINK) conf->entity.pdcp_xfrm.pkt_dir = RTE_SECURITY_PDCP_UPLINK; else conf->entity.pdcp_xfrm.pkt_dir = RTE_SECURITY_PDCP_DOWNLINK; - conf->entity.pdcp_xfrm.sn_size = pdcp_test_data_sn_size[index]; + conf->entity.pdcp_xfrm.sn_size = pdcp_test_data_sn_size_get(suite_type, index); /* Zero initialize unsupported flags */ conf->entity.pdcp_xfrm.hfn_threshold = 0; conf->entity.pdcp_xfrm.hfn_ovrd = 0; - conf->entity.pdcp_xfrm.sdap_enabled = 0; + + conf->entity.pdcp_xfrm.sdap_enabled = (suite_type == PDCP_TEST_SUITE_TY_SDAP); c_xfrm.type = RTE_CRYPTO_SYM_XFORM_CIPHER; - c_xfrm.cipher.algo = pdcp_test_params[index].cipher_alg; - c_xfrm.cipher.key.length = pdcp_test_params[index].cipher_key_len; - c_xfrm.cipher.key.data = pdcp_test_crypto_key[index]; + c_xfrm.cipher.algo = pdcp_test_param_cipher_alg_get(suite_type, index); + c_xfrm.cipher.key.length = pdcp_test_param_cipher_key_len_get(suite_type, index); + c_xfrm.cipher.key.data = pdcp_test_crypto_key_get(suite_type, index); a_xfrm.type = RTE_CRYPTO_SYM_XFORM_AUTH; - if (pdcp_test_params[index].auth_alg == 0) { + if (pdcp_test_param_auth_alg_get(suite_type, index) == 0) { conf->is_integrity_protected = false; } else { - a_xfrm.auth.algo = pdcp_test_params[index].auth_alg; - a_xfrm.auth.key.data = pdcp_test_auth_key[index]; - a_xfrm.auth.key.length = pdcp_test_params[index].auth_key_len; + a_xfrm.auth.algo = pdcp_test_param_auth_alg_get(suite_type, index); + a_xfrm.auth.key.data = pdcp_test_auth_key_get(suite_type, index); + a_xfrm.auth.key.length = pdcp_test_param_auth_key_len_get(suite_type, index); conf->is_integrity_protected = true; } - pdcp_hdr_sz = pdcp_hdr_size_get(pdcp_test_data_sn_size[index]); + pdcp_hdr_sz = pdcp_hdr_size_get(pdcp_test_data_sn_size_get(suite_type, index)); /* * Uplink means PDCP entity is configured for transmit. Downlink means PDCP entity is @@ -640,41 +984,42 @@ create_test_conf_from_index(const int index, struct pdcp_test_conf *conf) conf->entity.dev_id = (uint8_t)cryptodev_id_get(conf->is_integrity_protected, &conf->c_xfrm, &conf->a_xfrm); - if (pdcp_test_params[index].domain == RTE_SECURITY_PDCP_MODE_CONTROL || - pdcp_test_params[index].domain == RTE_SECURITY_PDCP_MODE_DATA) { - data = pdcp_test_data_in[index]; - sn = pdcp_sn_from_raw_get(data, pdcp_test_data_sn_size[index]); - conf->entity.pdcp_xfrm.hfn = pdcp_test_hfn[index]; + if (pdcp_test_param_domain_get(suite_type, index) == RTE_SECURITY_PDCP_MODE_CONTROL || + pdcp_test_param_domain_get(suite_type, index) == RTE_SECURITY_PDCP_MODE_DATA) { + data = pdcp_test_data_in_get(suite_type, index); + sn = pdcp_sn_from_raw_get(data, pdcp_test_data_sn_size_get(suite_type, index)); + conf->entity.pdcp_xfrm.hfn = pdcp_test_hfn_get(suite_type, index); conf->entity.sn = sn; } if (conf->entity.pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK) { #ifdef VEC_DUMP - debug_hexdump(stdout, "Original vector:", pdcp_test_data_in[index], - pdcp_test_data_in_len[index]); + debug_hexdump(stdout, "Original vector:", pdcp_test_data_in_get(suite_type, index), + pdcp_test_data_in_len_get(suite_type, index)); #endif /* Since the vectors available already have PDCP header, trim the same */ - conf->input_len = pdcp_test_data_in_len[index] - pdcp_hdr_sz; - memcpy(conf->input, pdcp_test_data_in[index] + pdcp_hdr_sz, conf->input_len); + conf->input_len = pdcp_test_data_in_len_get(suite_type, index) - pdcp_hdr_sz; + memcpy(conf->input, pdcp_test_data_in_get(suite_type, index) + pdcp_hdr_sz, + conf->input_len); } else { - conf->input_len = pdcp_test_data_in_len[index]; + conf->input_len = pdcp_test_data_in_len_get(suite_type, index); if (conf->is_integrity_protected) conf->input_len += RTE_PDCP_MAC_I_LEN; - memcpy(conf->input, pdcp_test_data_out[index], conf->input_len); + memcpy(conf->input, pdcp_test_data_out_get(suite_type, index), conf->input_len); #ifdef VEC_DUMP debug_hexdump(stdout, "Original vector:", conf->input, conf->input_len); #endif } if (conf->entity.pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK) - expected = pdcp_test_data_out[index]; + expected = pdcp_test_data_out_get(suite_type, index); else - expected = pdcp_test_data_in[index]; + expected = pdcp_test_data_in_get(suite_type, index); /* Calculate expected packet length */ - expected_len = pdcp_test_data_in_len[index]; + expected_len = pdcp_test_data_in_len_get(suite_type, index); /* In DL processing, PDCP header would be stripped */ if (conf->entity.pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) { @@ -1836,7 +2181,7 @@ run_test_for_one_known_vec(const void *arg) struct pdcp_test_conf test_conf; int i = *(const uint32_t *)arg; - create_test_conf_from_index(i, &test_conf); + create_test_conf_from_index(i, &test_conf, PDCP_TEST_SUITE_TY_BASIC); return test_attempt_single(&test_conf); } @@ -1924,8 +2269,21 @@ static struct unit_test_suite status_report_test_cases = { } }; +static struct unit_test_suite sdap_test_cases = { + .suite_name = "PDCP SDAP", + .unit_test_cases = { + TEST_CASE_NAMED_WITH_DATA("SDAP Known vector cases", + ut_setup_pdcp, ut_teardown_pdcp, + run_test_with_all_sdap_known_vec, test_attempt_single), + TEST_CASE_NAMED_WITH_DATA("SDAP combined mode", + ut_setup_pdcp, ut_teardown_pdcp, + run_test_with_all_sdap_known_vec, test_combined), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; struct unit_test_suite *test_suites[] = { NULL, /* Place holder for known_vector_cases */ + &sdap_test_cases, &combined_mode_cases, &hfn_sn_test_cases, &reorder_test_cases, @@ -1945,11 +2303,12 @@ static int test_pdcp(void) { struct unit_test_suite *known_vector_cases; - int ret, index[NB_TESTS]; + uint32_t nb_tests = nb_tests_get(PDCP_TEST_SUITE_TY_BASIC); + int ret, index[nb_tests]; uint32_t i, size; size = sizeof(struct unit_test_suite); - size += (NB_TESTS + 1) * sizeof(struct unit_test_case); + size += (nb_tests + 1) * sizeof(struct unit_test_case); known_vector_cases = rte_zmalloc(NULL, size, 0); if (known_vector_cases == NULL) @@ -1957,7 +2316,7 @@ test_pdcp(void) known_vector_cases->suite_name = "Known vector cases"; - for (i = 0; i < NB_TESTS; i++) { + for (i = 0; i < nb_tests; i++) { index[i] = i; known_vector_cases->unit_test_cases[i].name = pdcp_test_params[i].name; known_vector_cases->unit_test_cases[i].data = (void *)&index[i]; @@ -1980,4 +2339,4 @@ test_pdcp(void) return ret; } -REGISTER_TEST_COMMAND(pdcp_autotest, test_pdcp); +REGISTER_FAST_TEST(pdcp_autotest, false, true, test_pdcp); diff --git a/app/test/test_pdump.c b/app/test/test_pdump.c index ea03056b475..9f7769707e9 100644 --- a/app/test/test_pdump.c +++ b/app/test/test_pdump.c @@ -136,8 +136,8 @@ test_pdump_uninit(void) return ret; } -void * -send_pkts(void *empty) +uint32_t +send_pkts(void *empty __rte_unused) { int ret = 0; struct rte_mbuf *pbuf[NUM_PACKETS] = { }; @@ -161,7 +161,7 @@ send_pkts(void *empty) rte_eth_dev_stop(portid); test_put_mbuf_to_pool(mp, pbuf); - return empty; + return 0; } /* @@ -219,4 +219,4 @@ test_pdump(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(pdump_autotest, test_pdump); +REGISTER_FAST_TEST(pdump_autotest, true, false, test_pdump); diff --git a/app/test/test_pdump.h b/app/test/test_pdump.h index abef9a85ec0..8746d61269a 100644 --- a/app/test/test_pdump.h +++ b/app/test/test_pdump.h @@ -9,7 +9,7 @@ #define NUM_ITR 3 /* sample test to send packets to the pdump client recursively */ -void *send_pkts(void *port); +uint32_t send_pkts(void *empty); /* Sample test to create setup for the pdump server tests */ int test_pdump_init(void); diff --git a/app/test/test_per_lcore.c b/app/test/test_per_lcore.c index 129578d1a30..63c5c80c24e 100644 --- a/app/test/test_per_lcore.c +++ b/app/test/test_per_lcore.c @@ -105,4 +105,4 @@ test_per_lcore(void) return 0; } -REGISTER_TEST_COMMAND(per_lcore_autotest, test_per_lcore); +REGISTER_FAST_TEST(per_lcore_autotest, true, true, test_per_lcore); diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c index 38da6bce27c..5f77b158c80 100644 --- a/app/test/test_pflock.c +++ b/app/test/test_pflock.c @@ -193,4 +193,4 @@ test_pflock(void) return 0; } -REGISTER_TEST_COMMAND(pflock_autotest, test_pflock); +REGISTER_FAST_TEST(pflock_autotest, true, true, test_pflock); diff --git a/app/test/test_pie.c b/app/test/test_pie.c index a3c0f97c9df..8036bac1e6d 100644 --- a/app/test/test_pie.c +++ b/app/test/test_pie.c @@ -1087,6 +1087,6 @@ test_pie_all(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(pie_autotest, test_pie); -REGISTER_TEST_COMMAND(pie_perf, test_pie_perf); -REGISTER_TEST_COMMAND(pie_all, test_pie_all); +REGISTER_FAST_TEST(pie_autotest, true, true, test_pie); +REGISTER_PERF_TEST(pie_perf, test_pie_perf); +REGISTER_PERF_TEST(pie_all, test_pie_all); diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c index 3ef590cb517..f6d97f21c93 100644 --- a/app/test/test_pmd_perf.c +++ b/app/test/test_pmd_perf.c @@ -899,4 +899,4 @@ test_set_rxtx_sc(cmdline_fixed_string_t type) return -1; } -REGISTER_TEST_COMMAND(pmd_perf_autotest, test_pmd_perf); +REGISTER_PERF_TEST(pmd_perf_autotest, test_pmd_perf); diff --git a/app/test/test_pmd_ring.c b/app/test/test_pmd_ring.c index 86b1db2c1fc..e83b9dd6b82 100644 --- a/app/test/test_pmd_ring.c +++ b/app/test/test_pmd_ring.c @@ -593,4 +593,4 @@ test_pmd_ring(void) return unit_test_suite_runner(&test_pmd_ring_suite); } -REGISTER_TEST_COMMAND(ring_pmd_autotest, test_pmd_ring); +REGISTER_FAST_TEST(ring_pmd_autotest, true, true, test_pmd_ring); diff --git a/app/test/test_pmd_ring_perf.c b/app/test/test_pmd_ring_perf.c index d249b7de5f1..3636df5c733 100644 --- a/app/test/test_pmd_ring_perf.c +++ b/app/test/test_pmd_ring_perf.c @@ -163,4 +163,4 @@ test_ring_pmd_perf(void) return 0; } -REGISTER_TEST_COMMAND(ring_pmd_perf_autotest, test_ring_pmd_perf); +REGISTER_PERF_TEST(ring_pmd_perf_autotest, test_ring_pmd_perf); diff --git a/app/test/test_power.c b/app/test/test_power.c index b7b5561348a..f1e80299d3c 100644 --- a/app/test/test_power.c +++ b/app/test/test_power.c @@ -134,6 +134,7 @@ test_power(void) const enum power_management_env envs[] = {PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM, PM_ENV_PSTATE_CPUFREQ, + PM_ENV_AMD_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ}; unsigned int i; @@ -172,4 +173,4 @@ test_power(void) } #endif -REGISTER_TEST_COMMAND(power_autotest, test_power); +REGISTER_FAST_TEST(power_autotest, true, true, test_power); diff --git a/app/test/test_power_cpufreq.c b/app/test/test_power_cpufreq.c index 4d013cd7bb4..619b2811c6a 100644 --- a/app/test/test_power_cpufreq.c +++ b/app/test/test_power_cpufreq.c @@ -93,6 +93,17 @@ check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo) freq_conv = (cur_freq + TEST_FREQ_ROUNDING_DELTA) / TEST_ROUND_FREQ_TO_N_100000; freq_conv = freq_conv * TEST_ROUND_FREQ_TO_N_100000; + } else if (env == PM_ENV_AMD_PSTATE_CPUFREQ) { + freq_conv = cur_freq > freqs[idx] ? (cur_freq - freqs[idx]) : + (freqs[idx] - cur_freq); + if (freq_conv <= TEST_FREQ_ROUNDING_DELTA) { + /* workaround: current frequency may deviate from + * nominal freq. Allow deviation of up to 50Mhz. + */ + printf("Current frequency deviated from nominal " + "frequency by %d Khz!\n", freq_conv); + freq_conv = freqs[idx]; + } } if (turbo) @@ -502,7 +513,8 @@ test_power_cpufreq(void) /* Test environment configuration */ env = rte_power_get_env(); if ((env != PM_ENV_ACPI_CPUFREQ) && (env != PM_ENV_PSTATE_CPUFREQ) && - (env != PM_ENV_CPPC_CPUFREQ)) { + (env != PM_ENV_CPPC_CPUFREQ) && + (env != PM_ENV_AMD_PSTATE_CPUFREQ)) { printf("Unexpectedly got an environment other than ACPI/PSTATE\n"); goto fail_all; } @@ -703,5 +715,5 @@ test_power_caps(void) #endif -REGISTER_TEST_COMMAND(power_cpufreq_autotest, test_power_cpufreq); +REGISTER_FAST_TEST(power_cpufreq_autotest, false, true, test_power_cpufreq); REGISTER_TEST_COMMAND(power_caps_autotest, test_power_caps); diff --git a/app/test/test_power_intel_uncore.c b/app/test/test_power_intel_uncore.c index 31163af84e8..80b45ce46e5 100644 --- a/app/test/test_power_intel_uncore.c +++ b/app/test/test_power_intel_uncore.c @@ -14,7 +14,7 @@ test_power_intel_uncore(void) } #else -#include +#include #include #define MAX_UNCORE_FREQS 32 @@ -246,10 +246,14 @@ test_power_intel_uncore(void) { int ret; + ret = rte_power_set_uncore_env(RTE_UNCORE_PM_ENV_INTEL_UNCORE); + if (ret < 0) + goto fail_all; + ret = rte_power_uncore_get_num_pkgs(); if (ret == 0) { printf("Uncore frequency management not supported/enabled on this kernel. " - "Please enable CONFIG_INTEL_UNCORE_FREQ_CONTROL if on x86 with linux kernel" + "Please enable CONFIG_INTEL_UNCORE_FREQ_CONTROL if on Intel x86 with linux kernel" " >= 5.6\n"); return TEST_SKIPPED; } @@ -298,4 +302,4 @@ test_power_intel_uncore(void) } #endif -REGISTER_TEST_COMMAND(power_intel_uncore_autotest, test_power_intel_uncore); +REGISTER_FAST_TEST(power_intel_uncore_autotest, true, true, test_power_intel_uncore); diff --git a/app/test/test_power_kvm_vm.c b/app/test/test_power_kvm_vm.c index cc66b7a8a03..464e06002e4 100644 --- a/app/test/test_power_kvm_vm.c +++ b/app/test/test_power_kvm_vm.c @@ -299,4 +299,4 @@ test_power_kvm_vm(void) } #endif -REGISTER_TEST_COMMAND(power_kvm_vm_autotest, test_power_kvm_vm); +REGISTER_FAST_TEST(power_kvm_vm_autotest, false, true, test_power_kvm_vm); diff --git a/app/test/test_prefetch.c b/app/test/test_prefetch.c index 7b4a8e41449..46e6828e6a8 100644 --- a/app/test/test_prefetch.c +++ b/app/test/test_prefetch.c @@ -35,4 +35,4 @@ test_prefetch(void) return 0; } -REGISTER_TEST_COMMAND(prefetch_autotest, test_prefetch); +REGISTER_FAST_TEST(prefetch_autotest, true, true, test_prefetch); diff --git a/app/test/test_rand_perf.c b/app/test/test_rand_perf.c index 26fb1d9a586..30204e12c01 100644 --- a/app/test/test_rand_perf.c +++ b/app/test/test_rand_perf.c @@ -96,4 +96,4 @@ test_rand_perf(void) return 0; } -REGISTER_TEST_COMMAND(rand_perf_autotest, test_rand_perf); +REGISTER_PERF_TEST(rand_perf_autotest, test_rand_perf); diff --git a/app/test/test_rawdev.c b/app/test/test_rawdev.c index 3c780e3f9e9..d34691dacf5 100644 --- a/app/test/test_rawdev.c +++ b/app/test/test_rawdev.c @@ -67,4 +67,4 @@ test_rawdev_selftests(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(rawdev_autotest, test_rawdev_selftests); +REGISTER_FAST_TEST(rawdev_autotest, true, true, test_rawdev_selftests); diff --git a/app/test/test_rcu_qsbr.c b/app/test/test_rcu_qsbr.c index 70404e89e62..72d8e0377e8 100644 --- a/app/test/test_rcu_qsbr.c +++ b/app/test/test_rcu_qsbr.c @@ -1418,4 +1418,4 @@ test_rcu_qsbr_main(void) return -1; } -REGISTER_TEST_COMMAND(rcu_qsbr_autotest, test_rcu_qsbr_main); +REGISTER_FAST_TEST(rcu_qsbr_autotest, true, true, test_rcu_qsbr_main); diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c index b15e5cef885..ce88a7333cd 100644 --- a/app/test/test_rcu_qsbr_perf.c +++ b/app/test/test_rcu_qsbr_perf.c @@ -690,4 +690,4 @@ test_rcu_qsbr_main(void) return -1; } -REGISTER_TEST_COMMAND(rcu_qsbr_perf_autotest, test_rcu_qsbr_main); +REGISTER_PERF_TEST(rcu_qsbr_perf_autotest, test_rcu_qsbr_main); diff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c index c11b65291fe..4b4929d7779 100644 --- a/app/test/test_reassembly_perf.c +++ b/app/test/test_reassembly_perf.c @@ -1000,4 +1000,4 @@ test_reassembly_perf(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(reassembly_perf_autotest, test_reassembly_perf); +REGISTER_PERF_TEST(reassembly_perf_autotest, test_reassembly_perf); diff --git a/app/test/test_reciprocal_division.c b/app/test/test_reciprocal_division.c index 8ea9b1d24d2..fb52b2d5a1c 100644 --- a/app/test/test_reciprocal_division.c +++ b/app/test/test_reciprocal_division.c @@ -164,4 +164,4 @@ test_reciprocal(void) return result; } -REGISTER_TEST_COMMAND(reciprocal_division, test_reciprocal); +REGISTER_PERF_TEST(reciprocal_division, test_reciprocal); diff --git a/app/test/test_reciprocal_division_perf.c b/app/test/test_reciprocal_division_perf.c index 4f625873e53..cf96d46a225 100644 --- a/app/test/test_reciprocal_division_perf.c +++ b/app/test/test_reciprocal_division_perf.c @@ -205,4 +205,4 @@ test_reciprocal_division_perf(void) return result; } -REGISTER_TEST_COMMAND(reciprocal_division_perf, test_reciprocal_division_perf); +REGISTER_PERF_TEST(reciprocal_division_perf, test_reciprocal_division_perf); diff --git a/app/test/test_red.c b/app/test/test_red.c index 84c292f8d87..aa7538d51a3 100644 --- a/app/test/test_red.c +++ b/app/test/test_red.c @@ -1878,5 +1878,5 @@ test_red_all(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ REGISTER_TEST_COMMAND(red_autotest, test_red); -REGISTER_TEST_COMMAND(red_perf, test_red_perf); -REGISTER_TEST_COMMAND(red_all, test_red_all); +REGISTER_PERF_TEST(red_perf, test_red_perf); +REGISTER_PERF_TEST(red_all, test_red_all); diff --git a/app/test/test_reorder.c b/app/test/test_reorder.c index c188f6ce679..501780cb260 100644 --- a/app/test/test_reorder.c +++ b/app/test/test_reorder.c @@ -548,4 +548,4 @@ test_reorder(void) } -REGISTER_TEST_COMMAND(reorder_autotest, test_reorder); +REGISTER_FAST_TEST(reorder_autotest, true, true, test_reorder); diff --git a/app/test/test_rib.c b/app/test/test_rib.c index 65b68564109..c7454f2c475 100644 --- a/app/test/test_rib.c +++ b/app/test/test_rib.c @@ -363,5 +363,5 @@ test_slow_rib(void) return unit_test_suite_runner(&rib_slow_tests); } -REGISTER_TEST_COMMAND(rib_autotest, test_rib); -REGISTER_TEST_COMMAND(rib_slow_autotest, test_slow_rib); +REGISTER_FAST_TEST(rib_autotest, true, true, test_rib); +REGISTER_PERF_TEST(rib_slow_autotest, test_slow_rib); diff --git a/app/test/test_rib6.c b/app/test/test_rib6.c index 336b779d2e1..33596fddb4e 100644 --- a/app/test/test_rib6.c +++ b/app/test/test_rib6.c @@ -367,5 +367,5 @@ test_slow_rib6(void) return unit_test_suite_runner(&rib6_slow_tests); } -REGISTER_TEST_COMMAND(rib6_autotest, test_rib6); -REGISTER_TEST_COMMAND(rib6_slow_autotest, test_slow_rib6); +REGISTER_FAST_TEST(rib6_autotest, true, true, test_rib6); +REGISTER_PERF_TEST(rib6_slow_autotest, test_slow_rib6); diff --git a/app/test/test_ring.c b/app/test/test_ring.c index bde33ab4a19..ba1fec1de3b 100644 --- a/app/test/test_ring.c +++ b/app/test/test_ring.c @@ -1241,4 +1241,4 @@ test_ring(void) return -1; } -REGISTER_TEST_COMMAND(ring_autotest, test_ring); +REGISTER_FAST_TEST(ring_autotest, true, true, test_ring); diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c index 3972fd9db3e..d7c5a4c30bf 100644 --- a/app/test/test_ring_perf.c +++ b/app/test/test_ring_perf.c @@ -579,4 +579,4 @@ test_ring_perf(void) return 0; } -REGISTER_TEST_COMMAND(ring_perf_autotest, test_ring_perf); +REGISTER_PERF_TEST(ring_perf_autotest, test_ring_perf); diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c index 4ae0bf8deb3..50798958d77 100644 --- a/app/test/test_rwlock.c +++ b/app/test/test_rwlock.c @@ -506,7 +506,7 @@ try_rwlock_test_rde_wro(void) return process_try_lcore_stats(); } -REGISTER_TEST_COMMAND(rwlock_test1_autotest, rwlock_test1); -REGISTER_TEST_COMMAND(rwlock_rda_autotest, try_rwlock_test_rda); -REGISTER_TEST_COMMAND(rwlock_rds_wrm_autotest, try_rwlock_test_rds_wrm); -REGISTER_TEST_COMMAND(rwlock_rde_wro_autotest, try_rwlock_test_rde_wro); +REGISTER_FAST_TEST(rwlock_test1_autotest, true, true, rwlock_test1); +REGISTER_FAST_TEST(rwlock_rda_autotest, true, true, try_rwlock_test_rda); +REGISTER_FAST_TEST(rwlock_rds_wrm_autotest, true, true, try_rwlock_test_rds_wrm); +REGISTER_FAST_TEST(rwlock_rde_wro_autotest, true, true, try_rwlock_test_rde_wro); diff --git a/app/test/test_sched.c b/app/test/test_sched.c index ddec5724473..6daee90bca4 100644 --- a/app/test/test_sched.c +++ b/app/test/test_sched.c @@ -215,4 +215,4 @@ test_sched(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(sched_autotest, test_sched); +REGISTER_FAST_TEST(sched_autotest, true, true, test_sched); diff --git a/app/test/test_security.c b/app/test/test_security.c index 4783cd06630..23fc7ffb4f4 100644 --- a/app/test/test_security.c +++ b/app/test/test_security.c @@ -2314,4 +2314,4 @@ test_security(void) return unit_test_suite_runner(&security_testsuite); } -REGISTER_TEST_COMMAND(security_autotest, test_security); +REGISTER_FAST_TEST(security_autotest, false, true, test_security); diff --git a/app/test/test_security_inline_macsec.c b/app/test/test_security_inline_macsec.c index bfb9e09752f..59b1b8a6a6c 100644 --- a/app/test/test_security_inline_macsec.c +++ b/app/test/test_security_inline_macsec.c @@ -136,7 +136,7 @@ init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len) static int init_mempools(unsigned int nb_mbuf) { - struct rte_security_ctx *sec_ctx; + void *sec_ctx; uint16_t nb_sess = 512; uint32_t sess_sz; char s[64]; @@ -482,7 +482,7 @@ test_macsec_post_process(struct rte_mbuf *m, const struct mcs_test_vector *td, } static void -mcs_stats_dump(struct rte_security_ctx *ctx, enum mcs_op op, +mcs_stats_dump(void *ctx, enum mcs_op op, void *rx_sess, void *tx_sess, uint8_t rx_sc_id, uint8_t tx_sc_id, uint16_t rx_sa_id[], uint16_t tx_sa_id[]) @@ -667,7 +667,7 @@ mcs_stats_dump(struct rte_security_ctx *ctx, enum mcs_op op, } static int -mcs_stats_check(struct rte_security_ctx *ctx, enum mcs_op op, +mcs_stats_check(void *ctx, enum mcs_op op, const struct mcs_test_opts *opts, const struct mcs_test_vector *td, void *rx_sess, void *tx_sess, @@ -837,10 +837,60 @@ test_macsec_event_callback(uint16_t port_id, enum rte_eth_event_type type, return 0; } +static int +test_macsec_sec_caps_verify(const struct mcs_test_opts *opts, + const struct rte_security_capability *sec_cap, bool silent) +{ + if (opts->mtu > sec_cap->macsec.mtu) { + if (!silent) + RTE_LOG(INFO, USER1, "MTU size is not supported\n"); + return -ENOTSUP; + } + + if (opts->replay_protect == 1 && sec_cap->macsec.anti_replay == 0) { + if (!silent) + RTE_LOG(INFO, USER1, "Anti replay is not supported\n"); + return -ENOTSUP; + } + + if (opts->replay_win_sz > sec_cap->macsec.replay_win_sz) { + if (!silent) + RTE_LOG(INFO, USER1, "Replay window size is not " + "supported\n"); + return -ENOTSUP; + } + + if (opts->rekey_en == 1 && sec_cap->macsec.re_key == 0) { + if (!silent) + RTE_LOG(INFO, USER1, "Rekey is not supported\n"); + return -ENOTSUP; + } + + if (opts->sectag_insert_mode == 0 && + sec_cap->macsec.relative_sectag_insert == 0) { + if (!silent) + RTE_LOG(INFO, USER1, "Relative offset sectag insert " + "not supported\n"); + return -ENOTSUP; + } + + if (opts->sectag_insert_mode == 1 && + sec_cap->macsec.fixed_sectag_insert == 0) { + if (!silent) + RTE_LOG(INFO, USER1, "Fixed offset sectag insert " + "not supported\n"); + return -ENOTSUP; + } + + return 0; +} + static int test_macsec(const struct mcs_test_vector *td[], enum mcs_op op, const struct mcs_test_opts *opts) { uint16_t rx_sa_id[MCS_MAX_FLOWS][RTE_SECURITY_MACSEC_NUM_AN] = {{0}}; + struct rte_security_capability_idx sec_cap_idx; + const struct rte_security_capability *sec_cap; uint16_t tx_sa_id[MCS_MAX_FLOWS][2] = {{0}}; uint16_t rx_sc_id[MCS_MAX_FLOWS] = {0}; uint16_t tx_sc_id[MCS_MAX_FLOWS] = {0}; @@ -850,7 +900,7 @@ test_macsec(const struct mcs_test_vector *td[], enum mcs_op op, const struct mcs struct rte_security_macsec_sa sa_conf = {0}; struct rte_security_macsec_sc sc_conf = {0}; struct mcs_err_vector err_vector = {0}; - struct rte_security_ctx *ctx; + void *ctx; int nb_rx = 0, nb_sent; int i, j = 0, ret, id, an = 0; uint8_t tci_off; @@ -858,12 +908,36 @@ test_macsec(const struct mcs_test_vector *td[], enum mcs_op op, const struct mcs memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * opts->nb_td); - ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(port_id); + ctx = rte_eth_dev_get_sec_ctx(port_id); if (ctx == NULL) { printf("Ethernet device doesn't support security features.\n"); return TEST_SKIPPED; } + sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; + sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_MACSEC; + sec_cap_idx.macsec.alg = td[0]->alg; + sec_cap = rte_security_capability_get(ctx, &sec_cap_idx); + if (sec_cap == NULL) { + printf("No capabilities registered\n"); + return TEST_SKIPPED; + } + + if (test_macsec_sec_caps_verify(opts, sec_cap, false) != 0) + return TEST_SKIPPED; + + if (opts->rekey_en) { + /* Verify the rekey td */ + sec_cap_idx.macsec.alg = opts->rekey_td->alg; + sec_cap = rte_security_capability_get(ctx, &sec_cap_idx); + if (sec_cap == NULL) { + printf("No capabilities registered\n"); + return TEST_SKIPPED; + } + if (test_macsec_sec_caps_verify(opts, sec_cap, false) != 0) + return TEST_SKIPPED; + } + tci_off = (opts->sectag_insert_mode == 1) ? RTE_ETHER_HDR_LEN : RTE_ETHER_HDR_LEN + (opts->nb_vlan * RTE_VLAN_HLEN); @@ -1181,11 +1255,12 @@ test_macsec(const struct mcs_test_vector *td[], enum mcs_op op, const struct mcs } static int -test_inline_macsec_encap_all(const void *data __rte_unused) +test_inline_macsec_encap_all(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1200,7 +1275,11 @@ test_inline_macsec_encap_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_ENCAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Cipher Auth Encryption case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nCipher Auth Encryption case %d failed", cur_td->test_idx); err = -1; } else { @@ -1209,17 +1288,19 @@ test_inline_macsec_encap_all(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_decap_all(const void *data __rte_unused) +test_inline_macsec_decap_all(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1232,7 +1313,11 @@ test_inline_macsec_decap_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Cipher Auth Decryption case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nCipher Auth Decryption case %d failed", cur_td->test_idx); err = -1; } else { @@ -1241,17 +1326,19 @@ test_inline_macsec_decap_all(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_auth_only_all(const void *data __rte_unused) +test_inline_macsec_auth_only_all(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1266,7 +1353,11 @@ test_inline_macsec_auth_only_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_integrity_vectors[i]; err = test_macsec(&cur_td, MCS_AUTH_ONLY, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Auth Generate case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nAuth Generate case %d failed", cur_td->test_idx); err = -1; } else { @@ -1275,17 +1366,19 @@ test_inline_macsec_auth_only_all(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_verify_only_all(const void *data __rte_unused) +test_inline_macsec_verify_only_all(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1299,7 +1392,11 @@ test_inline_macsec_verify_only_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_integrity_vectors[i]; err = test_macsec(&cur_td, MCS_VERIFY_ONLY, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Auth Verify case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nAuth Verify case %d failed", cur_td->test_idx); err = -1; } else { @@ -1308,17 +1405,19 @@ test_inline_macsec_verify_only_all(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_encap_decap_all(const void *data __rte_unused) +test_inline_macsec_encap_decap_all(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1334,7 +1433,11 @@ test_inline_macsec_encap_decap_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_ENCAP_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Cipher Auth Encap-decap case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nCipher Auth Encap-decap case %d failed", cur_td->test_idx); err = -1; } else { @@ -1343,18 +1446,20 @@ test_inline_macsec_encap_decap_all(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_auth_verify_all(const void *data __rte_unused) +test_inline_macsec_auth_verify_all(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1369,7 +1474,11 @@ test_inline_macsec_auth_verify_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_integrity_vectors[i]; err = test_macsec(&cur_td, MCS_AUTH_VERIFY, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Auth Generate + Verify case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nAuth Generate + Verify case %d failed", cur_td->test_idx); err = -1; } else { @@ -1378,13 +1487,14 @@ test_inline_macsec_auth_verify_all(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_multi_flow(const void *data __rte_unused) +test_inline_macsec_multi_flow(void) { const struct mcs_test_vector *tv[MCS_MAX_FLOWS]; struct mcs_test_vector iter[MCS_MAX_FLOWS]; @@ -1418,7 +1528,9 @@ test_inline_macsec_multi_flow(const void *data __rte_unused) tv[i] = (const struct mcs_test_vector *)&iter[i]; } err = test_macsec(tv, MCS_ENCAP_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Cipher Auth Encryption multi flow skipped\n"); + } else if (err) { printf("\nCipher Auth Encryption multi flow failed"); err = -1; } else { @@ -1429,11 +1541,12 @@ test_inline_macsec_multi_flow(const void *data __rte_unused) } static int -test_inline_macsec_with_vlan(const void *data __rte_unused) +test_inline_macsec_with_vlan(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1456,7 +1569,11 @@ test_inline_macsec_with_vlan(const void *data __rte_unused) opts.nb_vlan = 2; } err = test_macsec(&cur_td, MCS_ENCAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("VLAN Encap case %d skipped", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\n VLAN Encap case %d failed", cur_td->test_idx); err = -1; } else { @@ -1477,7 +1594,11 @@ test_inline_macsec_with_vlan(const void *data __rte_unused) opts.nb_vlan = 2; } err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("VLAN Decap case %d skipped", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\n VLAN Decap case %d failed", cur_td->test_idx); err = -1; } else { @@ -1487,16 +1608,18 @@ test_inline_macsec_with_vlan(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, (2 * size) + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + 2 * size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_pkt_drop(const void *data __rte_unused) +test_inline_macsec_pkt_drop(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1512,7 +1635,11 @@ test_inline_macsec_pkt_drop(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_err_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Packet drop case %d skipped", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nPacket drop case %d passed", cur_td->test_idx); err = 0; } else { @@ -1521,17 +1648,19 @@ test_inline_macsec_pkt_drop(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_untagged_rx(const void *data __rte_unused) +test_inline_macsec_untagged_rx(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1547,7 +1676,10 @@ test_inline_macsec_untagged_rx(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_untagged_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = 0; else err = -1; @@ -1559,24 +1691,29 @@ test_inline_macsec_untagged_rx(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_untagged_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = 0; else err = -1; all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + 2 * size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_bad_tag_rx(const void *data __rte_unused) +test_inline_macsec_bad_tag_rx(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1592,7 +1729,10 @@ test_inline_macsec_bad_tag_rx(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_bad_tag_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; @@ -1600,17 +1740,19 @@ test_inline_macsec_bad_tag_rx(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_sa_not_in_use(const void *data __rte_unused) +test_inline_macsec_sa_not_in_use(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1626,7 +1768,10 @@ test_inline_macsec_sa_not_in_use(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; @@ -1634,17 +1779,19 @@ test_inline_macsec_sa_not_in_use(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_decap_stats(const void *data __rte_unused) +test_inline_macsec_decap_stats(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1660,7 +1807,11 @@ test_inline_macsec_decap_stats(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Decap stats case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nDecap stats case %d failed", cur_td->test_idx); err = -1; } else { @@ -1669,17 +1820,19 @@ test_inline_macsec_decap_stats(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_verify_only_stats(const void *data __rte_unused) +test_inline_macsec_verify_only_stats(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1695,7 +1848,11 @@ test_inline_macsec_verify_only_stats(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_integrity_vectors[i]; err = test_macsec(&cur_td, MCS_VERIFY_ONLY, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Verify only stats case %d skipped\n", cur_td->test_idx); + skipped += 1; + err = 0; + } else if (err) { printf("\nVerify only stats case %d failed", cur_td->test_idx); err = -1; } else { @@ -1704,17 +1861,19 @@ test_inline_macsec_verify_only_stats(const void *data __rte_unused) } all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); - return all_err; + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_pkts_invalid_stats(const void *data __rte_unused) +test_inline_macsec_pkts_invalid_stats(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1729,23 +1888,28 @@ test_inline_macsec_pkts_invalid_stats(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_err_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = 0; else err = -1; all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_pkts_unchecked_stats(const void *data __rte_unused) +test_inline_macsec_pkts_unchecked_stats(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_DISABLE; @@ -1761,7 +1925,10 @@ test_inline_macsec_pkts_unchecked_stats(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_integrity_vectors[i]; err = test_macsec(&cur_td, MCS_VERIFY_ONLY, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; @@ -1769,16 +1936,18 @@ test_inline_macsec_pkts_unchecked_stats(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_out_pkts_untagged(const void *data __rte_unused) +test_inline_macsec_out_pkts_untagged(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1794,7 +1963,10 @@ test_inline_macsec_out_pkts_untagged(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_ENCAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; @@ -1802,16 +1974,18 @@ test_inline_macsec_out_pkts_untagged(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_out_pkts_toolong(const void *data __rte_unused) +test_inline_macsec_out_pkts_toolong(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_NO_DISCARD; @@ -1827,7 +2001,10 @@ test_inline_macsec_out_pkts_toolong(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_ENCAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; @@ -1835,16 +2012,18 @@ test_inline_macsec_out_pkts_toolong(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_encap_stats(const void *data __rte_unused) +test_inline_macsec_encap_stats(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1860,23 +2039,29 @@ test_inline_macsec_encap_stats(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_cipher_vectors[i]; err = test_macsec(&cur_td, MCS_ENCAP, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; + all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_auth_only_stats(const void *data __rte_unused) +test_inline_macsec_auth_only_stats(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1892,23 +2077,29 @@ test_inline_macsec_auth_only_stats(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_integrity_vectors[i]; err = test_macsec(&cur_td, MCS_AUTH_ONLY, &opts); - if (err) + if (err == TEST_SKIPPED) { + skipped += 1; + err = 0; + } else if (err) err = -1; else err = 0; + all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_interrupts_all(const void *data __rte_unused) +test_inline_macsec_interrupts_all(void) { struct mcs_err_vector err_vector = {0}; const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; + int skipped = 0; int i, size; int err, all_err = 0; enum rte_eth_event_macsec_subtype subtype[] = { @@ -1937,8 +2128,13 @@ test_inline_macsec_interrupts_all(const void *data __rte_unused) for (i = 0; i < size; i++) { cur_td = &list_mcs_intr_test_vectors[i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if ((err_vector.event == RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR) && - (err_vector.event_subtype == subtype[i])) { + if (err == TEST_SKIPPED) { + printf("Sectag val err interrupt test case %d skipped", + cur_td->test_idx); + skipped += 1; + err = 0; + } else if ((err_vector.event == RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR) && + (err_vector.event_subtype == subtype[i])) { printf("\nSectag val err interrupt test case %d passed", cur_td->test_idx); err = 0; @@ -1952,16 +2148,18 @@ test_inline_macsec_interrupts_all(const void *data __rte_unused) rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_MACSEC, test_macsec_event_callback, &err_vector); - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_rekey_tx(const void *data __rte_unused) +test_inline_macsec_rekey_tx(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -1979,7 +2177,11 @@ test_inline_macsec_rekey_tx(const void *data __rte_unused) cur_td = &list_mcs_rekey_vectors[i]; opts.rekey_td = &list_mcs_rekey_vectors[++i]; err = test_macsec(&cur_td, MCS_ENCAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Tx hw rekey test case %d skipped\n", i); + skipped += 1; + err = 0; + } else if (err) { printf("Tx hw rekey test case %d failed\n", i); err = -1; } else { @@ -1989,16 +2191,18 @@ test_inline_macsec_rekey_tx(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size / 2 + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_rekey_rx(const void *data __rte_unused) +test_inline_macsec_rekey_rx(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; int err, all_err = 0; + int skipped = 0; int i, size; opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT; @@ -2014,7 +2218,11 @@ test_inline_macsec_rekey_rx(const void *data __rte_unused) cur_td = &list_mcs_rekey_vectors[i]; opts.rekey_td = &list_mcs_rekey_vectors[++i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) { + if (err == TEST_SKIPPED) { + printf("Rx rekey test case %d skipped\n", i); + skipped += 1; + err = 0; + } else if (err) { printf("Rx rekey test case %d failed\n", i); err = -1; } else { @@ -2024,17 +2232,19 @@ test_inline_macsec_rekey_rx(const void *data __rte_unused) all_err += err; } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size / 2 + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int -test_inline_macsec_anti_replay(const void *data __rte_unused) +test_inline_macsec_anti_replay(void) { const struct mcs_test_vector *cur_td; struct mcs_test_opts opts = {0}; uint16_t replay_win_sz[2] = {32, 0}; int err, all_err = 0; + int skipped = 0; int i, size; int j; @@ -2055,21 +2265,30 @@ test_inline_macsec_anti_replay(const void *data __rte_unused) opts.ar_td[1] = &list_mcs_anti_replay_vectors[++i]; opts.ar_td[2] = &list_mcs_anti_replay_vectors[++i]; err = test_macsec(&cur_td, MCS_DECAP, &opts); - if (err) { - printf("Replay window: %u, Anti replay test case %d failed\n", - opts.replay_win_sz, i); + if (err == TEST_SKIPPED) { + printf("Replay window: %u, Anti replay test " + "case %d skipped\n", opts.replay_win_sz, + i); + skipped += 1; + err = 0; + } else if (err) { + printf("Replay window: %u, Anti replay test " + "case %d failed\n", opts.replay_win_sz, + i); err = -1; } else { - printf("Replay window: %u, Anti replay test case %d passed\n", - opts.replay_win_sz, i); + printf("Replay window: %u, Anti replay test " + "case %d passed\n", opts.replay_win_sz, + i); err = 0; } all_err += err; } } - printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, -all_err); - return all_err; + printf("\n%s: Success: %d, Failure: %d, Skipped: %d\n", __func__, + size / 2 + all_err - skipped, -all_err, skipped); + return skipped > 0 ? TEST_SKIPPED : all_err; } static int diff --git a/app/test/test_security_inline_proto.c b/app/test/test_security_inline_proto.c index 45aa742c6b6..78a2064b656 100644 --- a/app/test/test_security_inline_proto.c +++ b/app/test/test_security_inline_proto.c @@ -136,7 +136,7 @@ static struct rte_flow *default_flow[RTE_MAX_ETHPORTS]; /* Create Inline IPsec session */ static int create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid, - void **sess, struct rte_security_ctx **ctx, + void **sess, void **ctx, uint32_t *ol_flags, const struct ipsec_test_flags *flags, struct rte_security_session_conf *sess_conf) { @@ -149,7 +149,7 @@ create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid, struct rte_security_capability_idx sec_cap_idx; const struct rte_security_capability *sec_cap; enum rte_security_ipsec_sa_direction dir; - struct rte_security_ctx *sec_ctx; + void *sec_ctx; uint32_t verify; sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; @@ -221,7 +221,7 @@ create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid, sess_conf->userdata = (void *) sa; - sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid); + sec_ctx = rte_eth_dev_get_sec_ctx(portid); if (sec_ctx == NULL) { printf("Ethernet device doesn't support security features.\n"); return TEST_SKIPPED; @@ -503,7 +503,7 @@ init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool static int init_mempools(unsigned int nb_mbuf) { - struct rte_security_ctx *sec_ctx; + void *sec_ctx; uint16_t nb_sess = 512; uint32_t sess_sz; char s[64]; @@ -784,6 +784,51 @@ event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx) return nb_rx; } +static int +verify_inbound_oop(struct ipsec_test_data *td, + bool silent, struct rte_mbuf *mbuf) +{ + int ret = TEST_SUCCESS, rc; + struct rte_mbuf *orig; + uint32_t len; + void *data; + + orig = *rte_security_oop_dynfield(mbuf); + if (!orig) { + if (!silent) + printf("\nUnable to get orig buffer OOP session"); + return TEST_FAILED; + } + + /* Skip Ethernet header comparison */ + rte_pktmbuf_adj(orig, RTE_ETHER_HDR_LEN); + + len = td->input_text.len; + if (orig->pkt_len != len) { + if (!silent) + printf("\nOriginal packet length mismatch, expected %u, got %u ", + len, orig->pkt_len); + ret = TEST_FAILED; + } + + data = rte_pktmbuf_mtod(orig, void *); + rc = memcmp(data, td->input_text.data, len); + if (rc) { + ret = TEST_FAILED; + if (silent) + goto exit; + + printf("TestCase %s line %d: %s\n", __func__, __LINE__, + "output text not as expected\n"); + + rte_hexdump(stdout, "expected", td->input_text.data, len); + rte_hexdump(stdout, "actual", data, len); + } +exit: + rte_pktmbuf_free(orig); + return ret; +} + static int test_ipsec_with_reassembly(struct reassembly_vector *vector, const struct ipsec_test_flags *flags) @@ -801,7 +846,7 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector, struct rte_crypto_sym_xform auth_in = {0}; struct rte_crypto_sym_xform aead_in = {0}; struct ipsec_test_data sa_data; - struct rte_security_ctx *ctx; + void *ctx; unsigned int i, nb_rx = 0, j; uint32_t ol_flags; bool outer_ipv4; @@ -1068,7 +1113,7 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td, struct rte_crypto_sym_xform auth = {0}; struct rte_crypto_sym_xform aead = {0}; struct sa_expiry_vector vector = {0}; - struct rte_security_ctx *ctx; + void *ctx; int nb_rx = 0, nb_sent; uint32_t ol_flags; int i, j = 0, ret; @@ -1107,6 +1152,12 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td, if (ret) return ret; + if (flags->inb_oop && rte_security_oop_dynfield_offset < 0) { + printf("\nDynamic field not available for inline inbound OOP"); + ret = TEST_FAILED; + goto out; + } + if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { ret = create_default_flow(port_id); if (ret) @@ -1198,6 +1249,15 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td, goto out; } + if (flags->inb_oop) { + ret = verify_inbound_oop(td, silent, rx_pkts_burst[i]); + if (ret != TEST_SUCCESS) { + for ( ; i < nb_rx; i++) + rte_pktmbuf_free(rx_pkts_burst[i]); + goto out; + } + } + rte_pktmbuf_free(rx_pkts_burst[i]); rx_pkts_burst[i] = NULL; } @@ -1338,7 +1398,7 @@ test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[], struct rte_mbuf *tx_pkt = NULL; int nb_rx, nb_sent; void *ses; - struct rte_security_ctx *ctx; + void *ctx; uint32_t ol_flags; bool outer_ipv4; int i, ret; @@ -2076,7 +2136,27 @@ test_ipsec_inline_proto_known_vec_inb(const void *test_data) } static int -test_ipsec_inline_proto_display_list(const void *data __rte_unused) +test_ipsec_inline_proto_oop_inb(const void *test_data) +{ + const struct ipsec_test_data *td = test_data; + struct ipsec_test_flags flags; + struct ipsec_test_data td_inb; + + memset(&flags, 0, sizeof(flags)); + flags.inb_oop = true; + + if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) + test_ipsec_td_in_from_out(td, &td_inb); + else + memcpy(&td_inb, td, sizeof(td_inb)); + + td_inb.ipsec_xform.options.ingress_oop = true; + + return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags); +} + +static int +test_ipsec_inline_proto_display_list(void) { struct ipsec_test_flags flags; @@ -2089,7 +2169,7 @@ test_ipsec_inline_proto_display_list(const void *data __rte_unused) } static int -test_ipsec_inline_proto_udp_encap(const void *data __rte_unused) +test_ipsec_inline_proto_udp_encap(void) { struct ipsec_test_flags flags; @@ -2102,7 +2182,7 @@ test_ipsec_inline_proto_udp_encap(const void *data __rte_unused) } static int -test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused) +test_ipsec_inline_proto_udp_ports_verify(void) { struct ipsec_test_flags flags; @@ -2116,7 +2196,7 @@ test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused) } static int -test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused) +test_ipsec_inline_proto_err_icv_corrupt(void) { struct ipsec_test_flags flags; @@ -2129,7 +2209,7 @@ test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused) } static int -test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused) +test_ipsec_inline_proto_tunnel_dst_addr_verify(void) { struct ipsec_test_flags flags; @@ -2142,7 +2222,7 @@ test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused) } static int -test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) +test_ipsec_inline_proto_tunnel_src_dst_addr_verify(void) { struct ipsec_test_flags flags; @@ -2155,7 +2235,7 @@ test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused } static int -test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused) +test_ipsec_inline_proto_inner_ip_csum(void) { struct ipsec_test_flags flags; @@ -2168,7 +2248,7 @@ test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused) } static int -test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused) +test_ipsec_inline_proto_inner_l4_csum(void) { struct ipsec_test_flags flags; @@ -2181,7 +2261,7 @@ test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused) } static int -test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused) +test_ipsec_inline_proto_tunnel_v4_in_v4(void) { struct ipsec_test_flags flags; @@ -2195,7 +2275,7 @@ test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused) } static int -test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused) +test_ipsec_inline_proto_tunnel_v6_in_v6(void) { struct ipsec_test_flags flags; @@ -2209,7 +2289,7 @@ test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused) } static int -test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused) +test_ipsec_inline_proto_tunnel_v4_in_v6(void) { struct ipsec_test_flags flags; @@ -2223,7 +2303,7 @@ test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused) } static int -test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused) +test_ipsec_inline_proto_tunnel_v6_in_v4(void) { struct ipsec_test_flags flags; @@ -2237,7 +2317,7 @@ test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused) } static int -test_ipsec_inline_proto_transport_v4(const void *data __rte_unused) +test_ipsec_inline_proto_transport_v4(void) { struct ipsec_test_flags flags; @@ -2251,7 +2331,7 @@ test_ipsec_inline_proto_transport_v4(const void *data __rte_unused) } static int -test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused) +test_ipsec_inline_proto_transport_l4_csum(void) { struct ipsec_test_flags flags = { .l4_csum = true, @@ -2263,7 +2343,7 @@ test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused) } static int -test_ipsec_inline_proto_stats(const void *data __rte_unused) +test_ipsec_inline_proto_stats(void) { struct ipsec_test_flags flags; @@ -2276,7 +2356,7 @@ test_ipsec_inline_proto_stats(const void *data __rte_unused) } static int -test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused) +test_ipsec_inline_proto_pkt_fragment(void) { struct ipsec_test_flags flags; @@ -2290,7 +2370,7 @@ test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused) } static int -test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_copy_df_inner_0(void) { struct ipsec_test_flags flags; @@ -2303,7 +2383,7 @@ test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_copy_df_inner_1(void) { struct ipsec_test_flags flags; @@ -2316,7 +2396,7 @@ test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_set_df_0_inner_1(void) { struct ipsec_test_flags flags; @@ -2329,7 +2409,7 @@ test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_set_df_1_inner_0(void) { struct ipsec_test_flags flags; @@ -2342,7 +2422,7 @@ test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(void) { struct ipsec_test_flags flags; @@ -2355,7 +2435,7 @@ test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(void) { struct ipsec_test_flags flags; @@ -2368,7 +2448,7 @@ test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(void) { struct ipsec_test_flags flags; @@ -2381,7 +2461,7 @@ test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(void) { struct ipsec_test_flags flags; @@ -2394,7 +2474,7 @@ test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(void) { struct ipsec_test_flags flags; @@ -2409,7 +2489,7 @@ test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(void) { struct ipsec_test_flags flags; @@ -2424,7 +2504,7 @@ test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(void) { struct ipsec_test_flags flags; @@ -2439,7 +2519,7 @@ test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(void) { struct ipsec_test_flags flags; @@ -2454,7 +2534,7 @@ test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(void) { struct ipsec_test_flags flags; @@ -2468,7 +2548,7 @@ test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(void) { struct ipsec_test_flags flags; @@ -2482,7 +2562,7 @@ test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(void) { struct ipsec_test_flags flags; @@ -2496,7 +2576,7 @@ test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(void) { struct ipsec_test_flags flags; @@ -2510,7 +2590,7 @@ test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused) +test_ipsec_inline_proto_ipv4_ttl_decrement(void) { struct ipsec_test_flags flags = { .dec_ttl_or_hop_limit = true, @@ -2521,7 +2601,7 @@ test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused) } static int -test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) +test_ipsec_inline_proto_ipv6_hop_limit_decrement(void) { struct ipsec_test_flags flags = { .ipv6 = true, @@ -2533,7 +2613,7 @@ test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) } static int -test_ipsec_inline_proto_iv_gen(const void *data __rte_unused) +test_ipsec_inline_proto_iv_gen(void) { struct ipsec_test_flags flags; @@ -2546,7 +2626,7 @@ test_ipsec_inline_proto_iv_gen(const void *data __rte_unused) } static int -test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused) +test_ipsec_inline_proto_sa_pkt_soft_expiry(void) { struct ipsec_test_flags flags = { .sa_expiry_pkts_soft = true, @@ -2555,7 +2635,7 @@ test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused) return test_ipsec_inline_proto_all(&flags); } static int -test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused) +test_ipsec_inline_proto_sa_byte_soft_expiry(void) { struct ipsec_test_flags flags = { .sa_expiry_bytes_soft = true, @@ -2565,7 +2645,7 @@ test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused) } static int -test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused) +test_ipsec_inline_proto_sa_pkt_hard_expiry(void) { struct ipsec_test_flags flags = { .sa_expiry_pkts_hard = true @@ -2575,7 +2655,7 @@ test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused) } static int -test_ipsec_inline_proto_sa_byte_hard_expiry(const void *data __rte_unused) +test_ipsec_inline_proto_sa_byte_hard_expiry(void) { struct ipsec_test_flags flags = { .sa_expiry_bytes_hard = true @@ -3165,6 +3245,11 @@ static struct unit_test_suite inline_ipsec_testsuite = { "IPv4 Reassembly with burst of 4 fragments", ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly, test_inline_ip_reassembly, &ipv4_4frag_burst_vector), + TEST_CASE_NAMED_WITH_DATA( + "Inbound Out-Of-Place processing", + ut_setup_inline_ipsec, ut_teardown_inline_ipsec, + test_ipsec_inline_proto_oop_inb, + &pkt_aes_128_gcm), TEST_CASES_END() /**< NULL terminate unit test array */ }, diff --git a/app/test/test_security_inline_proto_vectors.h b/app/test/test_security_inline_proto_vectors.h index 61a045b446e..3ac75588a31 100644 --- a/app/test/test_security_inline_proto_vectors.h +++ b/app/test/test_security_inline_proto_vectors.h @@ -88,7 +88,7 @@ struct ip_reassembly_test_packet pkt_ipv6_udp_p1 = { .l4_offset = 40, .data = { /* IP */ - 0x60, 0x00, 0x00, 0x00, 0x05, 0xb4, 0x2C, 0x40, + 0x60, 0x00, 0x00, 0x00, 0x05, 0xb4, 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -135,7 +135,7 @@ struct ip_reassembly_test_packet pkt_ipv6_udp_p2 = { .l4_offset = 40, .data = { /* IP */ - 0x60, 0x00, 0x00, 0x00, 0x11, 0x5a, 0x2c, 0x40, + 0x60, 0x00, 0x00, 0x00, 0x11, 0x5a, 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c index d26d2c010eb..873bd604534 100644 --- a/app/test/test_seqlock.c +++ b/app/test/test_seqlock.c @@ -187,4 +187,4 @@ test_seqlock(void) return rc; } -REGISTER_TEST_COMMAND(seqlock_autotest, test_seqlock); +REGISTER_FAST_TEST(seqlock_autotest, true, true, test_seqlock); diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c index 422d2a83e85..c12d52d8f16 100644 --- a/app/test/test_service_cores.c +++ b/app/test/test_service_cores.c @@ -1040,7 +1040,7 @@ test_service_common(void) return unit_test_suite_runner(&service_tests); } -REGISTER_TEST_COMMAND(service_autotest, test_service_common); +REGISTER_FAST_TEST(service_autotest, true, true, test_service_common); static struct unit_test_suite service_perf_tests = { .suite_name = "service core performance test suite", @@ -1062,4 +1062,4 @@ test_service_perf(void) return unit_test_suite_runner(&service_perf_tests); } -REGISTER_TEST_COMMAND(service_perf_autotest, test_service_perf); +REGISTER_PERF_TEST(service_perf_autotest, test_service_perf); diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c index 3f59372300c..9a481f27180 100644 --- a/app/test/test_spinlock.c +++ b/app/test/test_spinlock.c @@ -302,4 +302,4 @@ test_spinlock(void) return ret; } -REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock); +REGISTER_FAST_TEST(spinlock_autotest, true, true, test_spinlock); diff --git a/app/test/test_stack.c b/app/test/test_stack.c index bc389614333..9150cc9fed1 100644 --- a/app/test/test_stack.c +++ b/app/test/test_stack.c @@ -379,5 +379,5 @@ test_lf_stack(void) #endif } -REGISTER_TEST_COMMAND(stack_autotest, test_stack); -REGISTER_TEST_COMMAND(stack_lf_autotest, test_lf_stack); +REGISTER_FAST_TEST(stack_autotest, false, true, test_stack); +REGISTER_FAST_TEST(stack_lf_autotest, false, true, test_lf_stack); diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c index 1eae00a334a..c5e1caa0364 100644 --- a/app/test/test_stack_perf.c +++ b/app/test/test_stack_perf.c @@ -354,5 +354,5 @@ test_lf_stack_perf(void) #endif } -REGISTER_TEST_COMMAND(stack_perf_autotest, test_stack_perf); -REGISTER_TEST_COMMAND(stack_lf_perf_autotest, test_lf_stack_perf); +REGISTER_PERF_TEST(stack_perf_autotest, test_stack_perf); +REGISTER_PERF_TEST(stack_lf_perf_autotest, test_lf_stack_perf); diff --git a/app/test/test_string_fns.c b/app/test/test_string_fns.c index 5e105d2bb98..ad41106df10 100644 --- a/app/test/test_string_fns.c +++ b/app/test/test_string_fns.c @@ -182,4 +182,4 @@ test_string_fns(void) return 0; } -REGISTER_TEST_COMMAND(string_autotest, test_string_fns); +REGISTER_FAST_TEST(string_autotest, true, true, test_string_fns); diff --git a/app/test/test_table.c b/app/test/test_table.c index d100666e1ce..27d2407517b 100644 --- a/app/test/test_table.c +++ b/app/test/test_table.c @@ -206,4 +206,4 @@ test_table(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(table_autotest, test_table); +REGISTER_FAST_TEST(table_autotest, true, true, test_table); diff --git a/app/test/test_tailq.c b/app/test/test_tailq.c index 9520219b0ad..2ff28773443 100644 --- a/app/test/test_tailq.c +++ b/app/test/test_tailq.c @@ -125,4 +125,4 @@ test_tailq(void) return ret; } -REGISTER_TEST_COMMAND(tailq_autotest, test_tailq); +REGISTER_FAST_TEST(tailq_autotest, true, true, test_tailq); diff --git a/app/test/test_telemetry_data.c b/app/test/test_telemetry_data.c index a960f44c1b7..59898ff5e98 100644 --- a/app/test/test_telemetry_data.c +++ b/app/test/test_telemetry_data.c @@ -2,11 +2,20 @@ * Copyright 2020 Intel Corporation */ +#ifdef RTE_EXEC_ENV_WINDOWS +#include "test.h" + +static int +telemetry_data_autotest(void) +{ + return TEST_SKIPPED; +} + +#else + #include #include -#ifndef RTE_EXEC_ENV_WINDOWS #include -#endif #include #include @@ -604,5 +613,6 @@ telemetry_data_autotest(void) close(sock); return 0; } +#endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(telemetry_data_autotest, telemetry_data_autotest); +REGISTER_FAST_TEST(telemetry_data_autotest, true, true, telemetry_data_autotest); diff --git a/app/test/test_telemetry_json.c b/app/test/test_telemetry_json.c index 5617eac5406..758e89303f5 100644 --- a/app/test/test_telemetry_json.c +++ b/app/test/test_telemetry_json.c @@ -213,4 +213,4 @@ test_telemetry_json(void) return 0; } -REGISTER_TEST_COMMAND(telemetry_json_autotest, test_telemetry_json); +REGISTER_FAST_TEST(telemetry_json_autotest, true, true, test_telemetry_json); diff --git a/app/test/test_thash.c b/app/test/test_thash.c index 53d9611e187..65d42fd9008 100644 --- a/app/test/test_thash.c +++ b/app/test/test_thash.c @@ -966,4 +966,4 @@ test_thash(void) return unit_test_suite_runner(&thash_tests); } -REGISTER_TEST_COMMAND(thash_autotest, test_thash); +REGISTER_FAST_TEST(thash_autotest, true, true, test_thash); diff --git a/app/test/test_thash_perf.c b/app/test/test_thash_perf.c index 687582aa322..9dfd5d3c21f 100644 --- a/app/test/test_thash_perf.c +++ b/app/test/test_thash_perf.c @@ -135,4 +135,4 @@ test_thash_perf(void) return 0; } -REGISTER_TEST_COMMAND(thash_perf_autotest, test_thash_perf); +REGISTER_PERF_TEST(thash_perf_autotest, test_thash_perf); diff --git a/app/test/test_threads.c b/app/test/test_threads.c index a4c4f651a44..4ac3f2671a6 100644 --- a/app/test/test_threads.c +++ b/app/test/test_threads.c @@ -239,8 +239,8 @@ test_thread_control_create_join(void) rte_thread_t thread_main_id; thread_id_ready = 0; - RTE_TEST_ASSERT(rte_thread_create_control(&thread_id, "test_control_threads", - NULL, thread_main, &thread_main_id) == 0, + RTE_TEST_ASSERT(rte_thread_create_control(&thread_id, "dpdk-test-thcc", + thread_main, &thread_main_id) == 0, "Failed to create thread."); while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0) @@ -279,4 +279,4 @@ test_threads(void) return unit_test_suite_runner(&threads_test_suite); } -REGISTER_TEST_COMMAND(threads_autotest, test_threads); +REGISTER_FAST_TEST(threads_autotest, true, true, test_threads); diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c index 242c136478b..1fbbedb33b5 100644 --- a/app/test/test_ticketlock.c +++ b/app/test/test_ticketlock.c @@ -314,4 +314,4 @@ test_ticketlock(void) return ret; } -REGISTER_TEST_COMMAND(ticketlock_autotest, test_ticketlock); +REGISTER_FAST_TEST(ticketlock_autotest, true, true, test_ticketlock); diff --git a/app/test/test_timer.c b/app/test/test_timer.c index 0c36dc9010b..cac8fc01149 100644 --- a/app/test/test_timer.c +++ b/app/test/test_timer.c @@ -594,4 +594,4 @@ test_timer(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(timer_autotest, test_timer); +REGISTER_FAST_TEST(timer_autotest, false, true, test_timer); diff --git a/app/test/test_timer_perf.c b/app/test/test_timer_perf.c index 0ede4b3e406..d2d74ebbc67 100644 --- a/app/test/test_timer_perf.c +++ b/app/test/test_timer_perf.c @@ -131,4 +131,4 @@ test_timer_perf(void) return 0; } -REGISTER_TEST_COMMAND(timer_perf_autotest, test_timer_perf); +REGISTER_PERF_TEST(timer_perf_autotest, test_timer_perf); diff --git a/app/test/test_timer_racecond.c b/app/test/test_timer_racecond.c index bb56ae8324f..6f8b448ff8d 100644 --- a/app/test/test_timer_racecond.c +++ b/app/test/test_timer_racecond.c @@ -172,4 +172,4 @@ test_timer_racecond(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(timer_racecond_autotest, test_timer_racecond); +REGISTER_PERF_TEST(timer_racecond_autotest, test_timer_racecond); diff --git a/app/test/test_trace.c b/app/test/test_trace.c index ad4a394a294..00809f433b1 100644 --- a/app/test/test_trace.c +++ b/app/test/test_trace.c @@ -250,4 +250,4 @@ test_trace(void) #endif /* !RTE_EXEC_ENV_WINDOWS */ -REGISTER_TEST_COMMAND(trace_autotest, test_trace); +REGISTER_FAST_TEST(trace_autotest, true, true, test_trace); diff --git a/app/test/test_trace_perf.c b/app/test/test_trace_perf.c index 46ae7d8074a..a6dd0757227 100644 --- a/app/test/test_trace_perf.c +++ b/app/test/test_trace_perf.c @@ -179,4 +179,4 @@ test_trace_perf(void) return TEST_SUCCESS; } -REGISTER_TEST_COMMAND(trace_perf_autotest, test_trace_perf); +REGISTER_PERF_TEST(trace_perf_autotest, test_trace_perf); diff --git a/app/test/test_vdev.c b/app/test/test_vdev.c index 9f0e6c4b991..3e262f30bc6 100644 --- a/app/test/test_vdev.c +++ b/app/test/test_vdev.c @@ -166,4 +166,4 @@ test_vdev(void) return 0; } -REGISTER_TEST_COMMAND(vdev_autotest, test_vdev); +REGISTER_FAST_TEST(vdev_autotest, true, true, test_vdev); diff --git a/app/test/test_version.c b/app/test/test_version.c index 1e1ff186567..52f269fb9cb 100644 --- a/app/test/test_version.c +++ b/app/test/test_version.c @@ -25,4 +25,4 @@ test_version(void) return 0; } -REGISTER_TEST_COMMAND(version_autotest, test_version); +REGISTER_FAST_TEST(version_autotest, true, true, test_version); diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index 2b55c85fd88..b7d74a467a2 100644 --- a/app/test/virtual_pmd.c +++ b/app/test/virtual_pmd.c @@ -164,17 +164,17 @@ virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, } static int -virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev, +virtual_ethdev_link_update_success(struct rte_eth_dev *bonding_eth_dev, int wait_to_complete __rte_unused) { - if (!bonded_eth_dev->data->dev_started) - bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + if (!bonding_eth_dev->data->dev_started) + bonding_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; return 0; } static int -virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused, +virtual_ethdev_link_update_fail(struct rte_eth_dev *bonding_eth_dev __rte_unused, int wait_to_complete __rte_unused) { return -1; diff --git a/buildtools/get-test-suites.py b/buildtools/get-test-suites.py new file mode 100644 index 00000000000..574c233aa87 --- /dev/null +++ b/buildtools/get-test-suites.py @@ -0,0 +1,39 @@ +#! /usr/bin/env python3 +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 Intel Corporation + +import sys +import re + +input_list = sys.argv[1:] +test_def_regex = re.compile("REGISTER_([A-Z]+)_TEST\s*\(\s*([a-z0-9_]+)") +test_suites = {} +# track tests not in any test suite. +non_suite_regex = re.compile("REGISTER_TEST_COMMAND\s*\(\s*([a-z0-9_]+)") +non_suite_tests = [] + +def get_fast_test_params(test_name, ln): + "Extract the extra fast-test parameters from the line" + (_, rest_of_line) = ln.split(test_name, 1) + (_, nohuge, asan, _func) = rest_of_line.split(',', 3) + return f":{nohuge.strip().lower()}:{asan.strip().lower()}" + +for fname in input_list: + with open(fname) as f: + contents = [ln.strip() for ln in f.readlines()] + test_lines = [ln for ln in contents if test_def_regex.match(ln)] + non_suite_tests.extend([non_suite_regex.match(ln).group(1) + for ln in contents if non_suite_regex.match(ln)]) + for ln in test_lines: + (test_suite, test_name) = test_def_regex.match(ln).group(1, 2) + suite_name = f"{test_suite.lower()}-tests" + if suite_name in test_suites: + test_suites[suite_name].append(test_name) + else: + test_suites[suite_name] = [test_name] + if suite_name == "fast-tests": + test_suites["fast-tests"][-1] += get_fast_test_params(test_name, ln) + +for suite in test_suites.keys(): + print(f"{suite}={','.join(test_suites[suite])}") +print(f"non_suite_tests={','.join(non_suite_tests)}") diff --git a/app/test/has_hugepage.py b/buildtools/has-hugepages.py similarity index 100% rename from app/test/has_hugepage.py rename to buildtools/has-hugepages.py diff --git a/buildtools/meson.build b/buildtools/meson.build index e1c600e40f9..948ac17dd20 100644 --- a/buildtools/meson.build +++ b/buildtools/meson.build @@ -4,7 +4,6 @@ pkgconf = find_program('pkg-config', 'pkgconf', required: false) check_symbols = find_program('check-symbols.sh') ldflags_ibverbs_static = find_program('options-ibverbs-static.sh') -objdump = find_program('objdump', 'llvm-objdump') python3 = import('python').find_installation(required: false) if python3.found() @@ -18,8 +17,8 @@ map_to_win_cmd = py3 + files('map_to_win.py') sphinx_wrapper = py3 + files('call-sphinx-build.py') get_cpu_count_cmd = py3 + files('get-cpu-count.py') get_numa_count_cmd = py3 + files('get-numa-count.py') -binutils_avx512_check = (py3 + files('binutils-avx512-check.py') + - [objdump] + cc.cmd_array()) +get_test_suites_cmd = py3 + files('get-test-suites.py') +has_hugepages_cmd = py3 + files('has-hugepages.py') # select library and object file format pmdinfo = py3 + files('gen-pmdinfo-cfile.py') + [meson.current_build_dir()] diff --git a/config/meson.build b/config/meson.build index d8223718e44..d56b0f9bce7 100644 --- a/config/meson.build +++ b/config/meson.build @@ -14,9 +14,34 @@ foreach env:supported_exec_envs set_variable('is_' + env, exec_env == env) endforeach +exec_envs = {'freebsd': 0, 'linux': 1, 'windows': 2} +foreach env, id:exec_envs + dpdk_conf.set('RTE_ENV_' + env.to_upper(), id) + dpdk_conf.set10('RTE_EXEC_ENV_IS_' + env.to_upper(), (exec_env == env)) +endforeach +dpdk_conf.set('RTE_EXEC_ENV', exec_envs[exec_env]) +dpdk_conf.set('RTE_EXEC_ENV_' + exec_env.to_upper(), 1) + # MS linker requires special treatment. # TODO: use cc.get_linker_id() with Meson >= 0.54 -is_ms_linker = is_windows and (cc.get_id() == 'clang') +is_ms_compiler = is_windows and (cc.get_id() == 'msvc') +is_ms_linker = is_windows and (cc.get_id() == 'clang' or is_ms_compiler) + +if is_ms_compiler + # force the use of intrinsics the MSVC compiler (except x86) + # does not support inline assembly + dpdk_conf.set('RTE_FORCE_INTRINSICS', 1) + + # suppress warnings raised for using standard library functions + # the MSVC compiler regards as unsafe but are used by DPDK + dpdk_conf.set('_CRT_SECURE_NO_WARNINGS', 1) + + # enable non-locking atomic operations + add_project_arguments('/experimental:c11atomics', language: 'c') + + # enable typeof operator + add_project_arguments('/d1experimental:typeof', language: 'c') +endif # set the major version, which might be used by drivers and libraries # depending on the configuration options @@ -129,12 +154,14 @@ endif dpdk_conf.set('RTE_MACHINE', cpu_instruction_set) machine_args = [] -# ppc64 does not support -march= at all, use -mcpu and -mtune for that -if host_machine.cpu_family().startswith('ppc') - machine_args += '-mcpu=' + cpu_instruction_set - machine_args += '-mtune=' + cpu_instruction_set -else - machine_args += '-march=' + cpu_instruction_set +if not is_ms_compiler + # ppc64 does not support -march= at all, use -mcpu and -mtune for that + if host_machine.cpu_family().startswith('ppc') + machine_args += '-mcpu=' + cpu_instruction_set + machine_args += '-mtune=' + cpu_instruction_set + else + machine_args += '-march=' + cpu_instruction_set + endif endif toolchain = cc.get_id() @@ -253,7 +280,11 @@ if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false endif # add -include rte_config to cflags -add_project_arguments('-include', 'rte_config.h', language: 'c') +if is_ms_compiler + add_project_arguments('/FI', 'rte_config.h', language: 'c') +else + add_project_arguments('-include', 'rte_config.h', language: 'c') +endif # enable extra warnings and disable any unwanted warnings # -Wall is added by default at warning level 1, and -Wextra @@ -303,6 +334,7 @@ endforeach # set other values pulled from the build options dpdk_conf.set('RTE_MAX_ETHPORTS', get_option('max_ethports')) dpdk_conf.set('RTE_LIBEAL_USE_HPET', get_option('use_hpet')) +dpdk_conf.set('RTE_ENABLE_STDATOMIC', get_option('enable_stdatomic')) dpdk_conf.set('RTE_ENABLE_TRACE_FP', get_option('enable_trace_fp')) # values which have defaults which may be overridden dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64) diff --git a/config/rte_config.h b/config/rte_config.h index 400e44e3cf3..da265d7dd24 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -28,6 +28,7 @@ /****** library defines ********/ /* EAL defines */ +#define RTE_CACHE_GUARD_LINES 1 #define RTE_MAX_HEAPS 32 #define RTE_MAX_MEMSEG_LISTS 128 #define RTE_MAX_MEMSEG_PER_LIST 8192 @@ -73,10 +74,12 @@ #define RTE_EVENT_MAX_DEVS 16 #define RTE_EVENT_MAX_PORTS_PER_DEV 255 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255 +#define RTE_EVENT_MAX_PROFILES_PER_PORT 8 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024 #define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32 #define RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE 32 +#define RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE 32 /* rawdev defines */ #define RTE_RAWDEV_MAX_DEVS 64 @@ -87,6 +90,7 @@ /* rte_power defines */ #define RTE_MAX_LCORE_FREQS 64 +#define RTE_MAX_UNCORE_FREQS 64 /* rte_graph defines */ #define RTE_GRAPH_BURST_SIZE 256 diff --git a/buildtools/binutils-avx512-check.py b/config/x86/binutils-avx512-check.py similarity index 100% rename from buildtools/binutils-avx512-check.py rename to config/x86/binutils-avx512-check.py diff --git a/config/x86/meson.build b/config/x86/meson.build index 54345c4da37..d8ef50fb401 100644 --- a/config/x86/meson.build +++ b/config/x86/meson.build @@ -4,6 +4,9 @@ # get binutils version for the workaround of Bug 97 binutils_ok = true if is_linux or cc.get_id() == 'gcc' + objdump = find_program('objdump', 'llvm-objdump') + binutils_avx512_check = (py3 + files('binutils-avx512-check.py') + + [objdump] + cc.cmd_array()) binutils_ok = run_command(binutils_avx512_check, check: false).returncode() == 0 if not binutils_ok and cc.has_argument('-mno-avx512f') machine_args += '-mno-avx512f' @@ -24,10 +27,18 @@ if cc.has_argument('-mavx512f') endif endif -# we require SSE4.2 for DPDK -if cc.get_define('__SSE4_2__', args: machine_args) == '' - message('SSE 4.2 not enabled by default, explicitly enabling') - machine_args += '-msse4' +if not is_ms_compiler + # we require SSE4.2 for DPDK + if cc.get_define('__SSE4_2__', args: machine_args) == '' + message('SSE 4.2 not enabled by default, explicitly enabling') + machine_args += '-msse4' + endif +endif + +# enable restricted transactional memory intrinsics +# https://gcc.gnu.org/onlinedocs/gcc/x86-transactional-memory-intrinsics.html +if cc.get_id() != 'msvc' + machine_args += '-mrtm' endif base_flags = ['SSE', 'SSE2', 'SSE3','SSSE3', 'SSE4_1', 'SSE4_2'] diff --git a/devtools/checkpatches.sh b/devtools/checkpatches.sh index 43f5e36a185..066449d147e 100755 --- a/devtools/checkpatches.sh +++ b/devtools/checkpatches.sh @@ -102,20 +102,28 @@ check_forbidden_additions() { # -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 - # refrain from using compiler __atomic_thread_fence() + # refrain from using compiler __rte_atomic_thread_fence() # It should be avoided on x86 for SMP case. awk -v FOLDERS="lib drivers app examples" \ - -v EXPRESSIONS="__atomic_thread_fence\\\(" \ + -v EXPRESSIONS="__rte_atomic_thread_fence\\\(" \ -v RET_ON_FAIL=1 \ - -v MESSAGE='Using __atomic_thread_fence' \ + -v MESSAGE='Using __rte_atomic_thread_fence, prefer rte_atomic_thread_fence' \ -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 - # refrain from using compiler __atomic_{add,and,nand,or,sub,xor}_fetch() + # refrain from using compiler __atomic_xxx builtins awk -v FOLDERS="lib drivers app examples" \ - -v EXPRESSIONS="__atomic_(add|and|nand|or|sub|xor)_fetch\\\(" \ + -v EXPRESSIONS="__atomic_.*\\\( __ATOMIC_(RELAXED|CONSUME|ACQUIRE|RELEASE|ACQ_REL|SEQ_CST)" \ -v RET_ON_FAIL=1 \ - -v MESSAGE='Using __atomic_op_fetch, prefer __atomic_fetch_op' \ + -v MESSAGE='Using __atomic_xxx/__ATOMIC_XXX built-ins, prefer rte_atomic_xxx/rte_memory_order_xxx' \ + -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ + "$1" || res=1 + + # refrain from using some pthread functions + awk -v FOLDERS="lib drivers app examples" \ + -v EXPRESSIONS="pthread_(create|join|detach|set(_?name_np|affinity_np)|attr_set(inheritsched|schedpolicy))\\\(" \ + -v RET_ON_FAIL=1 \ + -v MESSAGE='Using pthread functions, prefer rte_thread' \ -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 @@ -127,6 +135,22 @@ check_forbidden_additions() { # -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 + # forbid use of non abstracted bit count operations + awk -v FOLDERS="lib drivers app examples" \ + -v EXPRESSIONS='\\<__builtin_(clz|clzll|ctz|ctzll|popcount|popcountll)\\>' \ + -v RET_ON_FAIL=1 \ + -v MESSAGE='Using __builtin helpers for bit count operations' \ + -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ + "$1" || res=1 + + # forbid inclusion of Linux header for PCI constants + awk -v FOLDERS="lib drivers app examples" \ + -v EXPRESSIONS='include.*linux/pci_regs\\.h' \ + -v RET_ON_FAIL=1 \ + -v MESSAGE='Using linux/pci_regs.h, prefer rte_pci.h' \ + -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ + "$1" || res=1 + # forbid use of experimental build flag except in examples awk -v FOLDERS='lib drivers app' \ -v EXPRESSIONS='-DALLOW_EXPERIMENTAL_API allow_experimental_apis' \ @@ -143,6 +167,14 @@ check_forbidden_additions() { # -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 + # forbid non-internal thread in drivers and libs + awk -v FOLDERS='lib drivers' \ + -v EXPRESSIONS="rte_thread_(set_name|create_control)\\\(" \ + -v RET_ON_FAIL=1 \ + -v MESSAGE='Prefer rte_thread_(set_prefixed_name|create_internal_control)' \ + -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ + "$1" || res=1 + # forbid inclusion of driver specific headers in apps and examples awk -v FOLDERS='app examples' \ -v EXPRESSIONS='include.*_driver\\.h include.*_pmd\\.h' \ @@ -151,6 +183,14 @@ check_forbidden_additions() { # -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 + # prevent addition of tests not in one of our test suites + awk -v FOLDERS='app/test' \ + -v EXPRESSIONS='REGISTER_TEST_COMMAND' \ + -v RET_ON_FAIL=1 \ + -v MESSAGE='Using REGISTER_TEST_COMMAND instead of REGISTER__TEST' \ + -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ + "$1" || res=1 + # SVG must be included with wildcard extension to allow conversion awk -v FOLDERS='doc' \ -v EXPRESSIONS='::[[:space:]]*[^[:space:]]*\\.svg' \ diff --git a/devtools/dts-check-format.sh b/devtools/dts-check-format.sh index c9b3702642e..3f43e17e885 100755 --- a/devtools/dts-check-format.sh +++ b/devtools/dts-check-format.sh @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2023 PANTHEON.tech s.r.o. usage() { echo "Usage: $(basename $0) [options] [directory]" @@ -11,9 +12,10 @@ usage() { format=true lint=true +typecheck=true # Comments after args serve as documentation; must be present -while getopts "hfl" arg; do +while getopts "hflt" arg; do case $arg in h) # Display this message echo 'Run formatting and linting programs for DTS.' @@ -26,6 +28,9 @@ while getopts "hfl" arg; do l) # Don't run linter lint=false ;; + t) # Don't run type checker + typecheck=false + ;; ?) usage exit 1 @@ -93,6 +98,20 @@ if $lint; then fi fi +if $typecheck; then + if $format || $lint; then + echo + fi + heading "Checking types in $directory/" + if command -v mypy > /dev/null; then + mypy . + errors=$((errors + $?)) + else + echo "mypy not found, unable to check types" + errors=$((errors + 1)) + fi +fi + echo heading "Summary for $directory/" echo "Found $errors errors" diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore index 3ff51509de3..325f34e0b6a 100644 --- a/devtools/libabigail.abignore +++ b/devtools/libabigail.abignore @@ -37,6 +37,10 @@ type_kind = enum changed_enumerators = RTE_CRYPTO_ASYM_XFORM_ECPM, RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END +; Ignore changes to bbdev FFT API which is experimental +[suppress_type] + name = rte_bbdev_fft_op + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Temporary exceptions till next major ABI version ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; diff --git a/devtools/test-meson-builds.sh b/devtools/test-meson-builds.sh index 84b907d2ea1..5c07063cbdc 100755 --- a/devtools/test-meson-builds.sh +++ b/devtools/test-meson-builds.sh @@ -120,10 +120,10 @@ config () # return fi options= - # deprecated libs may be disabled by default, so for complete builds ensure - # no libs are disabled - if ! echo $* | grep -q -- 'disable_libs' ; then - options="$options -Ddisable_libs=" + # deprecated libs are disabled by default, so for complete builds + # enable them + if ! echo $* | grep -q -- 'enable_deprecated_libs' ; then + options="$options -Denable_deprecated_libs=*" fi if echo $* | grep -qw -- '--default-library=shared' ; then options="$options -Dexamples=all" @@ -227,11 +227,13 @@ for c in gcc clang ; do for s in static shared ; do if [ $s = shared ] ; then abicheck=ABI + stdatomic=-Denable_stdatomic=true else abicheck=skipABI # save time and disk space + stdatomic=-Denable_stdatomic=false fi export CC="$CCACHE $c" - build build-$c-$s $c $abicheck --default-library=$s + build build-$c-$s $c $abicheck $stdatomic --default-library=$s unset CC done done @@ -282,6 +284,9 @@ build build-loongarch64-generic-gcc $f ABI $use_shared # IBM POWER f=$srcdir/config/ppc/ppc64le-power8-linux-gcc +if grep -q 'NAME="Ubuntu"' /etc/os-release ; then + f=$f-ubuntu +fi build build-ppc64-power8-gcc $f ABI $use_shared # generic RISC-V diff --git a/devtools/words-case.txt b/devtools/words-case.txt index 0f005796af8..0200741ed86 100644 --- a/devtools/words-case.txt +++ b/devtools/words-case.txt @@ -54,6 +54,7 @@ MPU MSI MSI-X MSS +MSVC MTU NEON Netlink diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index fdeda139329..a6a768bd7c6 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -29,6 +29,7 @@ The public API headers are grouped by topics: [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h), [event_timer_adapter](@ref rte_event_timer_adapter.h), [event_crypto_adapter](@ref rte_event_crypto_adapter.h), + [event_dma_adapter](@ref rte_event_dma_adapter.h), [rawdev](@ref rte_rawdev.h), [metrics](@ref rte_metrics.h), [bitrate](@ref rte_bitrate.h), @@ -48,6 +49,7 @@ The public API headers are grouped by topics: [iavf](@ref rte_pmd_iavf.h), [bnxt](@ref rte_pmd_bnxt.h), [cnxk](@ref rte_pmd_cnxk.h), + [cnxk_eventdev](@ref rte_pmd_cnxk_eventdev.h), [cnxk_mempool](@ref rte_pmd_cnxk_mempool.h), [dpaa](@ref rte_pmd_dpaa.h), [dpaa2](@ref rte_pmd_dpaa2.h), @@ -115,9 +117,11 @@ The public API headers are grouped by topics: [IPsec SAD](@ref rte_ipsec_sad.h), [IP](@ref rte_ip.h), [frag/reass](@ref rte_ip_frag.h), + [UDP](@ref rte_udp.h), [SCTP](@ref rte_sctp.h), [TCP](@ref rte_tcp.h), - [UDP](@ref rte_udp.h), + [TLS](@ref rte_tls.h), + [DTLS](@ref rte_dtls.h), [GTP](@ref rte_gtp.h), [GRO](@ref rte_gro.h), [GSO](@ref rte_gso.h), @@ -155,6 +159,7 @@ The public API headers are grouped by topics: - **classification** [reorder](@ref rte_reorder.h), + [dispatcher](@ref rte_dispatcher.h), [distributor](@ref rte_distributor.h), [EFD](@ref rte_efd.h), [ACL](@ref rte_acl.h), @@ -206,7 +211,8 @@ The public API headers are grouped by topics: * graph_nodes: [eth_node](@ref rte_node_eth_api.h), [ip4_node](@ref rte_node_ip4_api.h), - [ip6_node](@ref rte_node_ip6_api.h) + [ip6_node](@ref rte_node_ip6_api.h), + [udp4_input_node](@ref rte_node_udp4_input_api.h) - **basic**: [bitops](@ref rte_bitops.h), diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in index a88accd9073..e94c9e4e46c 100644 --- a/doc/api/doxy-api.conf.in +++ b/doc/api/doxy-api.conf.in @@ -9,6 +9,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \ @TOPDIR@/drivers/crypto/scheduler \ @TOPDIR@/drivers/dma/dpaa2 \ @TOPDIR@/drivers/event/dlb2 \ + @TOPDIR@/drivers/event/cnxk \ @TOPDIR@/drivers/mempool/cnxk \ @TOPDIR@/drivers/mempool/dpaa2 \ @TOPDIR@/drivers/net/ark \ @@ -34,6 +35,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \ @TOPDIR@/lib/cmdline \ @TOPDIR@/lib/compressdev \ @TOPDIR@/lib/cryptodev \ + @TOPDIR@/lib/dispatcher \ @TOPDIR@/lib/distributor \ @TOPDIR@/lib/dmadev \ @TOPDIR@/lib/efd \ @@ -84,6 +86,7 @@ INPUT += @API_EXAMPLES@ FILE_PATTERNS = rte_*.h \ cmdline.h PREDEFINED = __DOXYGEN__ \ + RTE_ATOMIC \ RTE_HAS_CPUSET \ VFIO_PRESENT \ __rte_lockable= \ @@ -123,11 +126,13 @@ EXAMPLE_PATTERNS = *.c EXAMPLE_RECURSIVE = YES OUTPUT_DIRECTORY = @OUTPUT@ +FULL_PATH_NAMES = @FULL_PATH_NAMES@ STRIP_FROM_PATH = @STRIP_FROM_PATH@ -GENERATE_HTML = YES -HTML_OUTPUT = @HTML_OUTPUT@ +GENERATE_HTML = @GENERATE_HTML@ +HTML_OUTPUT = html GENERATE_LATEX = NO -GENERATE_MAN = NO +GENERATE_MAN = @GENERATE_MAN@ +MAN_LINKS = YES HAVE_DOT = NO diff --git a/doc/api/generate_doxygen.py b/doc/api/generate_doxygen.py index d3a22869f61..c704f130188 100755 --- a/doc/api/generate_doxygen.py +++ b/doc/api/generate_doxygen.py @@ -7,7 +7,7 @@ pattern = re.compile('^Preprocessing (.*)...$') out_dir, *doxygen_command = sys.argv[1:] -out_file = os.path.join(os.path.dirname(out_dir), 'doxygen.out') +out_file = os.path.join(out_dir + '.out') dep_file = f'{out_dir}.d' with open(out_file, 'w') as out: subprocess.run(doxygen_command, check=True, stdout=out) diff --git a/doc/api/meson.build b/doc/api/meson.build index 2876a78a7e0..5b50692df9e 100644 --- a/doc/api/meson.build +++ b/doc/api/meson.build @@ -29,11 +29,11 @@ example = custom_target('examples.dox', install_dir: htmldir, build_by_default: get_option('enable_docs')) +# set up common Doxygen configuration cdata = configuration_data() cdata.set('VERSION', meson.project_version()) cdata.set('API_EXAMPLES', join_paths(dpdk_build_root, 'doc', 'api', 'examples.dox')) cdata.set('OUTPUT', join_paths(dpdk_build_root, 'doc', 'api')) -cdata.set('HTML_OUTPUT', 'html') cdata.set('TOPDIR', dpdk_source_root) cdata.set('STRIP_FROM_PATH', ' '.join([dpdk_source_root, join_paths(dpdk_build_root, 'doc', 'api')])) cdata.set('WARN_AS_ERROR', 'NO') @@ -41,14 +41,35 @@ if get_option('werror') cdata.set('WARN_AS_ERROR', 'YES') endif -doxy_conf = configure_file(input: 'doxy-api.conf.in', - output: 'doxy-api.conf', - configuration: cdata) +# configure HTML Doxygen run +html_cdata = configuration_data() +html_cdata.merge_from(cdata) +html_cdata.set('GENERATE_HTML', 'YES') +html_cdata.set('GENERATE_MAN', 'NO') +html_cdata.set('FULL_PATH_NAMES', 'YES') -doxy_build = custom_target('doxygen', +doxy_html_conf = configure_file(input: 'doxy-api.conf.in', + output: 'doxy-api-html.conf', + configuration: html_cdata) + +# configure manpage Doxygen run +man_cdata = configuration_data() +man_cdata.merge_from(cdata) +man_cdata.set('GENERATE_HTML', 'NO') +man_cdata.set('GENERATE_MAN', 'YES') +# for manpages, have the pages only titled with the header name, +# rather than the full path to the header +man_cdata.set('FULL_PATH_NAMES', 'NO') + +doxy_man_conf = configure_file(input: 'doxy-api.conf.in', + output: 'doxy-api-man.conf', + configuration: man_cdata) + +# do Doxygen runs +doxy_html_build = custom_target('doxygen-html', depends: example, depend_files: 'doxy-api-index.md', - input: doxy_conf, + input: doxy_html_conf, output: 'html', depfile: 'html.d', command: [generate_doxygen, '@OUTPUT@', doxygen, '@INPUT@'], @@ -56,5 +77,26 @@ doxy_build = custom_target('doxygen', install_dir: htmldir, build_by_default: get_option('enable_docs')) -doc_targets += doxy_build -doc_target_names += 'Doxygen_API' +doc_targets += doxy_html_build +doc_target_names += 'Doxygen_API(HTML)' + +doxy_man_build = custom_target('doxygen-man', + depends: example, + depend_files: 'doxy-api-index.md', + input: doxy_man_conf, + output: 'man', + depfile: 'man.d', + command: [generate_doxygen, '@OUTPUT@', doxygen, '@INPUT@'], + install: get_option('enable_docs'), + install_dir: get_option('datadir'), + build_by_default: get_option('enable_docs')) + +doc_targets += doxy_man_build +doc_target_names += 'Doxygen_API(Manpage)' + +# refresh the manpage database on install +# if DPDK manpages are installed to a staging directory, not in MANPATH, this has no effect +mandb = find_program('mandb', required: false) +if mandb.found() and get_option('enable_docs') and meson.version().version_compare('>=0.55.0') + meson.add_install_script(mandb) +endif diff --git a/doc/guides/bbdevs/features/vrb2.ini b/doc/guides/bbdevs/features/vrb2.ini new file mode 100644 index 00000000000..23ca6990b73 --- /dev/null +++ b/doc/guides/bbdevs/features/vrb2.ini @@ -0,0 +1,14 @@ +; +; Supported features of the 'Intel vRAN Boost v2' baseband driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Turbo Decoder (4G) = Y +Turbo Encoder (4G) = Y +LDPC Decoder (5G) = Y +LDPC Encoder (5G) = Y +LLR/HARQ Compression = Y +FFT/SRS = Y +External DDR Access = N +HW Accelerated = Y diff --git a/doc/guides/bbdevs/index.rst b/doc/guides/bbdevs/index.rst index 77d4c546649..269157d77f4 100644 --- a/doc/guides/bbdevs/index.rst +++ b/doc/guides/bbdevs/index.rst @@ -15,4 +15,5 @@ Baseband Device Drivers fpga_5gnr_fec acc100 vrb1 + vrb2 la12xx diff --git a/doc/guides/bbdevs/vrb1.rst b/doc/guides/bbdevs/vrb1.rst index 9c48d309643..fdefb20651b 100644 --- a/doc/guides/bbdevs/vrb1.rst +++ b/doc/guides/bbdevs/vrb1.rst @@ -71,11 +71,7 @@ The Intel vRAN Boost v1.0 PMD supports the following bbdev capabilities: - ``RTE_BBDEV_TURBO_EARLY_TERMINATION``: set early termination feature. - ``RTE_BBDEV_TURBO_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data. - ``RTE_BBDEV_TURBO_HALF_ITERATION_EVEN``: set half iteration granularity. - - ``RTE_BBDEV_TURBO_SOFT_OUTPUT``: set the APP LLR soft output. - - ``RTE_BBDEV_TURBO_EQUALIZER``: set the turbo equalizer feature. - - ``RTE_BBDEV_TURBO_SOFT_OUT_SATURATE``: set the soft output saturation. - ``RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH``: set to run an extra odd iteration after CRC match. - - ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT``: set if negative APP LLR output supported. - ``RTE_BBDEV_TURBO_MAP_DEC``: supports flexible parallel MAP engine decoding. * For the FFT operation: diff --git a/doc/guides/bbdevs/vrb2.rst b/doc/guides/bbdevs/vrb2.rst new file mode 100644 index 00000000000..d51b9be4f08 --- /dev/null +++ b/doc/guides/bbdevs/vrb2.rst @@ -0,0 +1,206 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2023 Intel Corporation + +.. include:: + +Intel\ |reg| vRAN Boost v2 Poll Mode Driver (PMD) +================================================= + +The Intel\ |reg| vRAN Boost integrated accelerator enables +cost-effective 4G and 5G next-generation virtualized Radio Access Network (vRAN) +solutions. +The Intel vRAN Boost v2.0 (VRB2 in the code) is specifically integrated on the +Intel\ |reg| Xeon\ |reg| Granite Rapids-D Process (GNR-D). + +Features +-------- + +Intel vRAN Boost v2.0 includes a 5G Low Density Parity Check (LDPC) encoder/decoder, +rate match/dematch, Hybrid Automatic Repeat Request (HARQ) with access to DDR +memory for buffer management, a 4G Turbo encoder/decoder, +a Fast Fourier Transform (FFT) block providing DFT/iDFT processing offload +for the 5G Sounding Reference Signal (SRS), a MLD-TS accelerator, a Queue Manager (QMGR), +and a DMA subsystem. +There is no dedicated on-card memory for HARQ, the coherent memory on the CPU side is being used. + +These hardware blocks provide the following features exposed by the PMD: + +- LDPC Encode in the Downlink (5GNR) +- LDPC Decode in the Uplink (5GNR) +- Turbo Encode in the Downlink (4G) +- Turbo Decode in the Uplink (4G) +- FFT processing +- MLD-TS processing +- Single Root I/O Virtualization (SR-IOV) with 16 Virtual Functions (VFs) per Physical Function (PF) +- Maximum of 2048 queues per VF +- Message Signaled Interrupts (MSIs) + +The Intel vRAN Boost v2.0 PMD supports the following bbdev capabilities: + +* For the LDPC encode operation: + - ``RTE_BBDEV_LDPC_CRC_24B_ATTACH``: set to attach CRC24B to CB(s). + - ``RTE_BBDEV_LDPC_RATE_MATCH``: if set then do not do Rate Match bypass. + - ``RTE_BBDEV_LDPC_INTERLEAVER_BYPASS``: if set then bypass interleaver. + - ``RTE_BBDEV_LDPC_ENC_SCATTER_GATHER``: supports scatter-gather for input/output data. + - ``RTE_BBDEV_LDPC_ENC_CONCATENATION``: concatenate code blocks with bit granularity. + +* For the LDPC decode operation: + - ``RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK``: check CRC24B from CB(s). + - ``RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP``: drops CRC24B bits appended while decoding. + - ``RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK``: check CRC24A from CB(s). + - ``RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK``: check CRC16 from CB(s). + - ``RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE``: provides an input for HARQ combining. + - ``RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE``: provides an input for HARQ combining. + - ``RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE``: disable early termination. + - ``RTE_BBDEV_LDPC_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data. + - ``RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION``: supports compression of the HARQ input/output. + - ``RTE_BBDEV_LDPC_LLR_COMPRESSION``: supports LLR input compression. + - ``RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION``: supports compression of the HARQ input/output. + - ``RTE_BBDEV_LDPC_SOFT_OUT_ENABLE``: set the APP LLR soft output. + - ``RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS``: set the APP LLR soft output after rate-matching. + - ``RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS``: disables the de-interleaver. + +* For the turbo encode operation: + - ``RTE_BBDEV_TURBO_CRC_24B_ATTACH``: set to attach CRC24B to CB(s). + - ``RTE_BBDEV_TURBO_RATE_MATCH``: if set then do not do Rate Match bypass. + - ``RTE_BBDEV_TURBO_ENC_INTERRUPTS``: set for encoder dequeue interrupts. + - ``RTE_BBDEV_TURBO_RV_INDEX_BYPASS``: set to bypass RV index. + - ``RTE_BBDEV_TURBO_ENC_SCATTER_GATHER``: supports scatter-gather for input/output data. + +* For the turbo decode operation: + - ``RTE_BBDEV_TURBO_CRC_TYPE_24B``: check CRC24B from CB(s). + - ``RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE``: perform subblock de-interleave. + - ``RTE_BBDEV_TURBO_DEC_INTERRUPTS``: set for decoder dequeue interrupts. + - ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN``: set if negative LLR input is supported. + - ``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP``: keep CRC24B bits appended while decoding. + - ``RTE_BBDEV_TURBO_DEC_CRC_24B_DROP``: option to drop the code block CRC after decoding. + - ``RTE_BBDEV_TURBO_EARLY_TERMINATION``: set early termination feature. + - ``RTE_BBDEV_TURBO_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data. + - ``RTE_BBDEV_TURBO_HALF_ITERATION_EVEN``: set half iteration granularity. + - ``RTE_BBDEV_TURBO_SOFT_OUTPUT``: set the APP LLR soft output. + - ``RTE_BBDEV_TURBO_EQUALIZER``: set the turbo equalizer feature. + - ``RTE_BBDEV_TURBO_SOFT_OUT_SATURATE``: set the soft output saturation. + - ``RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH``: set to run an extra odd iteration after CRC match. + - ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT``: set if negative APP LLR output supported. + - ``RTE_BBDEV_TURBO_MAP_DEC``: supports flexible parallel MAP engine decoding. + +* For the FFT operation: + - ``RTE_BBDEV_FFT_WINDOWING``: flexible windowing capability. + - ``RTE_BBDEV_FFT_CS_ADJUSTMENT``: flexible adjustment of Cyclic Shift time offset. + - ``RTE_BBDEV_FFT_DFT_BYPASS``: set for bypass the DFT and get directly into iDFT input. + - ``RTE_BBDEV_FFT_IDFT_BYPASS``: set for bypass the IDFT and get directly the DFT output. + - ``RTE_BBDEV_FFT_WINDOWING_BYPASS``: set for bypass time domain windowing. + +* For the MLD-TS operation: + - ``RTE_BBDEV_MLDTS_REP``: set to repeat and reuse channel across operations. + +Installation +------------ + +Section 3 of the DPDK manual provides instructions on installing and compiling DPDK. + +DPDK requires hugepages to be configured as detailed in section 2 of the DPDK manual. +The bbdev test application has been tested with a configuration 40 x 1GB hugepages. +The hugepage configuration of a server may be examined using: + +.. code-block:: console + + grep Huge* /proc/meminfo + + +Initialization +-------------- + +When the device first powers up, its PCI Physical Functions (PF) +can be listed through these commands for Intel vRAN Boost v2: + +.. code-block:: console + + sudo lspci -vd8086:57c2 + +The physical and virtual functions are compatible with Linux UIO drivers: +``vfio`` (preferred) and ``igb_uio`` (legacy). +However, in order to work the 5G/4G FEC device first needs to be bound +to one of these Linux drivers through DPDK. + + +Configure the VFs through PF +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The PCI virtual functions must be configured before working or getting assigned +to VMs/Containers. +The configuration involves allocating the number of hardware queues, priorities, +load balance, bandwidth and other settings necessary for the device +to perform FEC functions. + +This configuration needs to be executed at least once after reboot or PCI FLR +and can be achieved by using the functions ``rte_acc_configure()``, +which sets up the parameters defined in the compatible ``rte_acc_conf`` structure. + + +Test Application +---------------- + +The bbdev class is provided with a test application, ``test-bbdev.py`` +and range of test data for testing the functionality of the device, +depending on the device's capabilities. +The test application is located under app/test-bbdev folder +and has the following options: + +.. code-block:: console + + "-p", "--testapp-path": specifies path to the bbdev test app. + "-e", "--eal-params": EAL arguments which are passed to the test app. + "-t", "--timeout": Timeout in seconds (default=300). + "-c", "--test-cases": Defines test cases to run. Run all if not specified. + "-v", "--test-vector": Test vector path. + "-n", "--num-ops": Number of operations to process on device (default=32). + "-b", "--burst-size": Operations enqueue/dequeue burst size (default=32). + "-s", "--snr": SNR in dB used when generating LLRs for bler tests. + "-s", "--iter_max": Number of iterations for LDPC decoder. + "-l", "--num-lcores": Number of lcores to run (default=16). + "-i", "--init-device": Initialise PF device with default values. + + +To execute the test application tool using simple decode or encode data, +type one of the following: + +.. code-block:: console + + ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_dec_default.data + ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_enc_default.data + + +The test application ``test-bbdev.py``, supports the ability to configure the +PF device with a default set of values, if the "-i" or "- -init-device" option +is included. The default values are defined in test_bbdev_perf.c. + + +Test Vectors +~~~~~~~~~~~~ + +In addition to the simple LDPC decoder and LDPC encoder tests, +bbdev also provides a range of additional tests under the test_vectors folder, +which may be useful. +The results of these tests will depend on the device capabilities which may +cause some test cases to be skipped, but no failure should be reported. + + +Alternate Baseband Device configuration tool +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +On top of the embedded configuration feature supported in test-bbdev using +"- -init-device" option mentioned above, there is also a tool available +to perform that device configuration using a companion application. +The ``pf_bb_config`` application notably enables then to run bbdev-test +from the VF and not only limited to the PF as captured above. + +See for more details: https://github.com/intel/pf-bb-config + +Specifically for the bbdev Intel vRAN Boost v2 PMD, the command below can be used +(note that ACC200 was used previously to refer to VRB2): + +.. code-block:: console + + pf_bb_config VRB2 -c ./vrb2/vrb2_config_vf_5g.cfg + test-bbdev.py -e="-c 0xff0 -a${VF_PCI_ADDR}" -c validation -n 64 -b 64 -l 1 -v ./ldpc_dec_default.data diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst index 79616e5610a..4eb62fc36a2 100644 --- a/doc/guides/contributing/documentation.rst +++ b/doc/guides/contributing/documentation.rst @@ -182,7 +182,17 @@ To build the documentation:: See :doc:`../linux_gsg/build_dpdk` for more detail on compiling DPDK with meson. -The output is generated in the directories ``build/doc/html/{api,guides}``. +The output is generated in the directory ``build/doc/``, with: + +* HTML versions of the guide docs, e.g. Getting Started Guides, Programmers Guide, in ``build/doc/guides/html`` +* HTML version of the API documentation in ``build/doc/api/html`` +* Man-page version of the API documentation in ``build/doc/api/man``. + If not installing DPDK system-wise, these pages can be accessed by adding this directory to the ``MANPATH`` environment variable. + For example: + +.. code-block:: console + + export MANPATH=:/path/to/build/doc/api/man .. Note:: diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini index e4e965c35a7..8df5fa2c850 100644 --- a/doc/guides/cryptodevs/features/aesni_mb.ini +++ b/doc/guides/cryptodevs/features/aesni_mb.ini @@ -20,6 +20,7 @@ OOP LB In LB Out = Y CPU crypto = Y Symmetric sessionless = Y Non-Byte aligned data = Y +Digest encrypted = Y ; ; Supported crypto algorithms of the 'aesni_mb' crypto driver. diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini index 55a1226965b..4f542c6038f 100644 --- a/doc/guides/cryptodevs/features/cn10k.ini +++ b/doc/guides/cryptodevs/features/cn10k.ini @@ -17,6 +17,7 @@ Symmetric sessionless = Y RSA PRIV OP KEY EXP = Y RSA PRIV OP KEY QT = Y Digest encrypted = Y +Sym raw data path API = Y Inner checksum = Y ; @@ -103,6 +104,7 @@ Modular Inversion = Diffie-hellman = ECDSA = Y ECPM = Y +SM2 = Y ; ; Supported Operating systems of the 'cn10k' crypto driver. diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini index 6f637fa7e2b..f411d4bab73 100644 --- a/doc/guides/cryptodevs/features/default.ini +++ b/doc/guides/cryptodevs/features/default.ini @@ -34,6 +34,7 @@ Sym raw data path API = Cipher multiple data units = Cipher wrapped key = Inner checksum = +Rx inject = ; ; Supported crypto algorithms of a default crypto driver. diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst index afdfb0bd226..730113da339 100644 --- a/doc/guides/cryptodevs/qat.rst +++ b/doc/guides/cryptodevs/qat.rst @@ -457,6 +457,10 @@ to see the full table) +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ | Yes | No | No | 4 | 401xxx | IDZ/ N/A | qat_401xxx | 4xxx | 4942 | 2 | 4943 | 16 | +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ + | Yes | Yes | Yes | 4 | 402xx | linux/6.4+ | qat_4xxx | 4xxx | 4944 | 2 | 4945 | 16 | + +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ + | Yes | No | No | 4 | 402xx | IDZ/ N/A | qat_4xxx | 4xxx | 4944 | 2 | 4945 | 16 | + +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ * Note: Symmetric mixed crypto algorithms feature on Gen 2 works only with IDZ driver version 4.9.0+ diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst index 1a592332820..cccb8a03044 100644 --- a/doc/guides/eventdevs/cnxk.rst +++ b/doc/guides/eventdevs/cnxk.rst @@ -48,6 +48,7 @@ Features of the OCTEON cnxk SSO PMD are: - HW managed event vectorization on CN10K for packets enqueued from ethdev to eventdev configurable per each Rx queue in Rx adapter. - Event vector transmission via Tx adapter. +- Up to 2 event link profiles. Prerequisites and Compilation procedure --------------------------------------- diff --git a/doc/guides/eventdevs/dlb2.rst b/doc/guides/eventdevs/dlb2.rst index f5bf5757c6c..6a273d6f453 100644 --- a/doc/guides/eventdevs/dlb2.rst +++ b/doc/guides/eventdevs/dlb2.rst @@ -17,8 +17,8 @@ Configuration ------------- The DLB PF PMD is a user-space PMD that uses VFIO to gain direct -device access. To use this operation mode, the PCIe PF device must be bound -to a DPDK-compatible VFIO driver, such as vfio-pci. +device access. To use this operation mode, the PCIe PF device must +be bound to a DPDK-compatible VFIO driver, such as vfio-pci. Eventdev API Notes ------------------ @@ -395,26 +395,6 @@ The depth must be between 32 and 1024, and must be a power of 2. --allow ea:00.0,max_enqueue_depth= -QE Weight -~~~~~~~~~ - -DLB supports advanced scheduling mechanisms, such as CQ weight. -Each load balanced CQ has a configurable work capacity (max 256) -which corresponds to the total QE weight DLB will allow to be enqueued -to that consumer. Every load balanced event/QE carries a weight of 0, 2, 4, -or 8 and DLB will increment a (per CQ) load indicator when it schedules a -QE to that CQ. The weight is also stored in the history list. When a -completion arrives, the weight is popped from the history list and used to -decrement the load indicator. This creates a new scheduling condition - a CQ -whose load is equal to or in excess of capacity is not available for traffic. -Note that the weight may not exceed the maximum CQ depth. - - .. code-block:: console - - --allow ea:00.0,cq_weight=all: - --allow ea:00.0,cq_weight=qidA-qidB: - --allow ea:00.0,cq_weight=qid: - Producer Coremask ~~~~~~~~~~~~~~~~~ @@ -450,3 +430,87 @@ won't be used. .. code-block:: console --allow ea:00.0,default_port_allocation= + +QE Weight +~~~~~~~~~ + +DLB supports advanced scheduling mechanisms, such as CQ weight. +Each load balanced CQ has a configurable work capacity (max 256) +which corresponds to the total QE weight DLB will allow to be enqueued +to that consumer. Every load balanced event/QE carries a weight of 0, 2, 4, +or 8 and DLB will increment a (per CQ) load indicator when it schedules a +QE to that CQ. The weight is also stored in the history list. When a +completion arrives, the weight is popped from the history list and used to +decrement the load indicator. This creates a new scheduling condition - a CQ +whose load is equal to or in excess of capacity is not available for traffic. +Note that the weight may not exceed the maximum CQ depth. + +Example command to enable QE Weight feature: + + .. code-block:: console + + --allow ea:00.0,enable_cq_weight= + +Running Eventdev Applications with DLB Device +--------------------------------------------- + +This section explains how to run eventdev applications +with DLB hardware as well as difference in command line parameter +to switch between a DLB hardware and a virtual eventdev device such as SW0, hence +users can run applications with or without DLB device to compare performance of +a DLB device. + +In order to run eventdev applications, DLB device must be bound +to a DPDK-compatible VFIO driver, such as vfio-pci. + +Example command to bind DLB device to vfio-pci driver: + + .. code-block:: console + + ../usertools/dpdk-devbind.py -b vfio-pci ea:00.0 + +Eventdev applications can be run with or without a DLB device. +Below examples give details of running eventdev application without DLB device +and with DLB device. Notice that the primary difference between two examples are +passing the parameter ``--vdev ``. The first example run uses a virtual +eventdev device SW0 while second example run directly and picks DLB device from +VFIO driver. + +Example command to run eventdev application without a DLB device: + + .. code-block:: console + + sudo /app/dpdk-test-eventdev --vdev=event_sw0 -- \ + --test=order_queue --plcores 1 --wlcores 2,3 + +After binding DLB device to a supported pci driver such as vfio-pci, +eventdev applications can be run on the DLB device. + +Example command to run eventdev application with a DLB device: + + .. code-block:: console + + sudo build/app/dpdk-test-eventdev -- --test=order_queue\ + --plcores=1 --wlcores=2-7 --stlist=o --worker_deq_depth=128\ + --prod_enq_burst_sz=64 --nb_flows=64 --nb_pkts=1000000 + +A particular DLB device can also be picked from command line by passing + ``--a`` or ``--allow`` option: + + .. code-block:: console + + sudo build/app/dpdk-test-eventdev --allow ea:00.0 -- --test=order_queue\ + --plcores=1 --wlcores=2-7 --stlist=o --worker_deq_depth=128\ + --prod_enq_burst_sz=64 --nb_flows=64 --nb_pkts=1000000 + +Debugging options +~~~~~~~~~~~~~~~~~ + +To specify log level for a DLB device use ``--log-level=dlb,8``. +Example command to run eventdev application with a DLB device log level enabled: + + .. code-block:: console + + sudo build/app/dpdk-test-eventdev --allow ea:00.0 --log-level=dlb,8 -- --test=order_queue\ + --plcores=1 --wlcores=2-7 --stlist=o --worker_deq_depth=128\ + --prod_enq_burst_sz=64 --nb_flows=64 --nb_pkts=1000000 diff --git a/doc/guides/eventdevs/features/cnxk.ini b/doc/guides/eventdevs/features/cnxk.ini index bee69bf8f48..5d353e36707 100644 --- a/doc/guides/eventdevs/features/cnxk.ini +++ b/doc/guides/eventdevs/features/cnxk.ini @@ -12,7 +12,8 @@ runtime_port_link = Y multiple_queue_port = Y carry_flow_id = Y maintenance_free = Y -runtime_queue_attr = y +runtime_queue_attr = Y +profile_links = Y [Eth Rx adapter Features] internal_port = Y diff --git a/doc/guides/eventdevs/features/default.ini b/doc/guides/eventdevs/features/default.ini index 00360f60c69..e980ae134a0 100644 --- a/doc/guides/eventdevs/features/default.ini +++ b/doc/guides/eventdevs/features/default.ini @@ -18,6 +18,7 @@ multiple_queue_port = carry_flow_id = maintenance_free = runtime_queue_attr = +profile_links = ; ; Features of a default Ethernet Rx adapter. @@ -44,6 +45,14 @@ internal_port_op_fwd = internal_port_qp_ev_bind = session_private_data = +; +; Features of a default DMA adapter. +; +[DMA adapter Features] +internal_port_op_new = +internal_port_op_fwd = +internal_port_vchan_ev_bind = + ; ; Features of a default Timer adapter. ; diff --git a/doc/guides/howto/img/lm_bond_virtio_sriov.svg b/doc/guides/howto/img/lm_bond_virtio_sriov.svg index d913ae01214..cde61cd5bed 100644 --- a/doc/guides/howto/img/lm_bond_virtio_sriov.svg +++ b/doc/guides/howto/img/lm_bond_virtio_sriov.svg @@ -410,7 +410,7 @@ sodipodi:role="line" id="tspan3882" x="-912.34381" - y="274.9668">bonded device withbonding device withbonded device withbonding device with create bonded device 1 0 - Created new bonded device net_bond_testpmd_0 on (port 2). - testpmd> add bonding slave 0 2 - testpmd> add bonding slave 1 2 + testpmd> create bonding device 1 0 + Created new bonding device net_bond_testpmd_0 on (port 2). + testpmd> add bonding member 0 2 + testpmd> add bonding member 1 2 testpmd> show bonding config 2 The syntax of the ``testpmd`` command is: -set bonding primary (slave id) (port id) +set bonding primary (member id) (port id) Set primary to P1 before starting bonding port. @@ -139,7 +139,7 @@ Set primary to P1 before starting bonding port. testpmd> show bonding config 2 -Primary is now P1. There are 2 active slaves. +Primary is now P1. There are 2 active members. Use P2 only for forwarding. @@ -151,7 +151,7 @@ Use P2 only for forwarding. testpmd> start testpmd> show bonding config 2 -Primary is now P1. There are 2 active slaves. +Primary is now P1. There are 2 active members. .. code-block:: console @@ -163,10 +163,10 @@ VF traffic is seen at P1 and P2. testpmd> clear port stats all testpmd> set bonding primary 0 2 - testpmd> remove bonding slave 1 2 + testpmd> remove bonding member 1 2 testpmd> show bonding config 2 -Primary is now P0. There is 1 active slave. +Primary is now P0. There is 1 active member. .. code-block:: console @@ -210,7 +210,7 @@ On host_server_1: Terminal 1 testpmd> show bonding config 2 -Primary is now P0. There is 1 active slave. +Primary is now P0. There is 1 active member. .. code-block:: console @@ -346,7 +346,7 @@ The ``mac_addr`` command only works with the Kernel PF for Niantic. testpmd> show port stats all. testpmd> show config fwd testpmd> show bonding config 2 - testpmd> add bonding slave 1 2 + testpmd> add bonding member 1 2 testpmd> set bonding primary 1 2 testpmd> show bonding config 2 testpmd> show port stats all @@ -355,7 +355,7 @@ VF traffic is seen at P1 (VF) and P2 (Bonded device). .. code-block:: console - testpmd> remove bonding slave 0 2 + testpmd> remove bonding member 0 2 testpmd> show bonding config 2 testpmd> port stop 0 testpmd> port close 0 diff --git a/doc/guides/nics/ark.rst b/doc/guides/nics/ark.rst index e1683cf7828..bcc9f505df4 100644 --- a/doc/guides/nics/ark.rst +++ b/doc/guides/nics/ark.rst @@ -334,6 +334,7 @@ with Arkville releases 21.05, 21.08 and 21.11. LTS versions of DPDK remain compatible with the corresponding Arkville version. If other combinations are required, please contact Atomic Rules support. +* DPDK 23.11 requires Arkville 23.11. * DPDK 22.07 requires Arkville 22.07. * DPDK 22.03 requires Arkville 22.03. * DPDK 21.05 requires Arkville 21.05. diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst index 70242ab2cee..6db880d632f 100644 --- a/doc/guides/nics/bnxt.rst +++ b/doc/guides/nics/bnxt.rst @@ -781,8 +781,8 @@ DPDK implements a light-weight library to allow PMDs to be bonded together and p .. code-block:: console -   dpdk-testpmd -l 0-3 -n4 --vdev 'net_bonding0,mode=0,slave=,slave=,mac=XX:XX:XX:XX:XX:XX’ – --socket_num=1 – -i --port-topology=chained - (ex) dpdk-testpmd -l 1,3,5,7,9 -n4 --vdev 'net_bonding0,mode=0,slave=0000:82:00.0,slave=0000:82:00.1,mac=00:1e:67:1d:fd:1d' – --socket-num=1 – -i --port-topology=chained +   dpdk-testpmd -l 0-3 -n4 --vdev 'net_bonding0,mode=0,member=,member=,mac=XX:XX:XX:XX:XX:XX’ – --socket_num=1 – -i --port-topology=chained + (ex) dpdk-testpmd -l 1,3,5,7,9 -n4 --vdev 'net_bonding0,mode=0,member=0000:82:00.0,member=0000:82:00.1,mac=00:1e:67:1d:fd:1d' – --socket-num=1 – -i --port-topology=chained Vector Processing ----------------- diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index 9229056f6fe..21063a80ff9 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -433,6 +433,26 @@ The OCTEON CN9K/CN10K SoC family NIC has inbuilt HW assisted external mempool ma as it is performance wise most effective way for packet allocation and Tx buffer recycling on OCTEON 9 SoC platform. +``mempool_cnxk`` rte_mempool cache sizes for CN10K +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The OCTEON CN10K SoC Family supports asynchronous batch allocation +of objects from an NPA pool. +In the CNXK mempool driver, asynchronous batch allocation is enabled +when local caches are enabled. +This asynchronous batch allocation will be using an additional local async buffer +whose size will be equal to ``RTE_ALIGN_CEIL(rte_mempool->cache_size, 16)``. +This can result in additional objects being cached locally. +While creating an rte_mempool using ``mempool_cnxk`` driver for OCTEON CN10K, +this must be taken into consideration +and the local cache sizes should be adjusted accordingly +so that starvation does not happen. + +For Eg: If the ``cache_size`` passed into ``rte_mempool_create`` is ``8``, +then the max objects than can get cached locally on a core +would be the sum of max objects in the local cache + max objects in the async buffer +i.e ``8 + RTE_ALIGN_CEIL(8, 16) = 24``. + CRC stripping ~~~~~~~~~~~~~ @@ -581,6 +601,18 @@ Runtime Config Options for inline device With the above configuration, driver would poll for soft expiry events every 1000 usec. +- ``NPC MCAM Aging poll frequency in seconds`` (default ``10``) + + Poll frequency for aging control thread can be specified by + ``aging_poll_freq`` ``devargs`` parameter. + + For example:: + + -a 0002:01:00.2,aging_poll_freq=50 + + With the above configuration, driver would poll for aging flows every 50 + seconds. + Debugging Options ----------------- diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst index 39a2b603f33..89a322c5f10 100644 --- a/doc/guides/nics/cpfl.rst +++ b/doc/guides/nics/cpfl.rst @@ -92,12 +92,68 @@ Runtime Configuration Then the PMD will configure Tx queue with single queue mode. Otherwise, split queue mode is chosen by default. +- ``representor`` (default ``not enabled``) + + The cpfl PMD supports the creation of APF/CPF/VF port representors. + Each port representor corresponds to a single function of that device. + Using the ``devargs`` option ``representor`` the user can specify + which functions to create port representors. + + Format is:: + + [[c]pf]vf + + Controller_id 0 is host (default), while 1 is accelerator core. + Pf_id 0 is APF (default), while 1 is CPF. + Default value can be omitted. + + Create 4 representors for 4 vfs on host APF:: + + -a BDF,representor=c0pf0vf[0-3] + + Or:: + + -a BDF,representor=pf0vf[0-3] + + Or:: + + -a BDF,representor=vf[0-3] + + Create a representor for CPF on accelerator core:: + + -a BDF,representor=c1pf1 + + Multiple representor devargs are supported. Create 4 representors for 4 + vfs on host APF and one representor for CPF on accelerator core:: + + -a BDF,representor=vf[0-3],representor=c1pf1 + +- ``flow_parser`` (default ``not enabled``) + + The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into + low-level hardware resources. + + The JSON configuration file is provided by the hardware vendor and is intended to work + exclusively with a specific P4 pipeline configuration, which must be compiled and programmed + into the hardware. + + The format of the JSON file strictly follows the internal specifications of the hardware + vendor and is not meant to be modified directly by users. + + Using the ``devargs`` option ``flow_parser`` the user can specify the path + of a json file, for example:: + + -a ca:00.0,flow_parser="refpkg.json" + + Then the PMD will load json file for device ``ca:00.0``. + The parameter is optional. Driver compilation and testing ------------------------------ Refer to the document :doc:`build_and_test` for details. +The jansson library must be installed to use rte_flow. Features -------- @@ -128,3 +184,35 @@ Hairpin queue E2100 Series can loopback packets from RX port to TX port. This feature is called port-to-port or hairpin. Currently, the PMD only supports single port hairpin. + +Flow offload +~~~~~~~~~~~~ + +PMD uses a json file to direct CPF PMD to parse rte_flow tokens into +low level hardware resources. + +- Required Libraries + + * jansson + + * For Ubuntu, it can be installed using `apt install libjansson-dev` + +- run testpmd with the json file, create two vports + + .. code-block:: console + + dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0-1],flow_parser="refpkg.json" -- -i + +#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport. Flow should be created on + vport X. Group M should match fxp module. Action port_representor Y means forward packet to local vport Y:: + + .. code-block:: console + + flow create X ingress group M pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \ + dst is 192.168.0.2 / tcp / end actions port_representor port_id Y / end + +#. Send a matched packet, and it should be displayed on PMD:: + + .. code-block:: console + + sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="ens25f0") diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 838e781d6d1..ac7de9a0f0a 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -67,6 +67,7 @@ ipv4 = Y ipv6 = Y ipv6_ext = Y ipv6_frag_ext = Y +ipv6_routing_ext = Y mark = Y mpls = Y nvgre = Y @@ -80,6 +81,7 @@ vxlan = Y vxlan_gpe = Y [rte_flow actions] +age = Y count = Y drop = Y flag = Y diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini index 470c45ce591..b03e8b35c3f 100644 --- a/doc/guides/nics/features/cnxk_vf.ini +++ b/doc/guides/nics/features/cnxk_vf.ini @@ -58,6 +58,7 @@ ipv4 = Y ipv6 = Y ipv6_ext = Y ipv6_frag_ext = Y +ipv6_routing_ext = Y mark = Y mpls = Y nvgre = Y @@ -71,6 +72,7 @@ vxlan = Y vxlan_gpe = Y [rte_flow actions] +age = Y count = Y drop = Y flag = Y diff --git a/doc/guides/nics/features/cpfl.ini b/doc/guides/nics/features/cpfl.ini index f4e45c7c680..66d0caebbb6 100644 --- a/doc/guides/nics/features/cpfl.ini +++ b/doc/guides/nics/features/cpfl.ini @@ -14,3 +14,22 @@ L4 checksum offload = P Linux = Y x86-32 = Y x86-64 = Y + +[rte_flow items] +eth = Y +icmp = Y +ipv4 = Y +tcp = Y +udp = Y +vlan = Y +vxlan = Y + +[rte_flow actions] +count = Y +drop = Y +port_representor = Y +queue = Y +represented_port = Y +rss = Y +vxlan_decap = Y +vxlan_encap = Y diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini index 2011e97127e..e41a97b3bb0 100644 --- a/doc/guides/nics/features/default.ini +++ b/doc/guides/nics/features/default.ini @@ -137,6 +137,7 @@ ppp = pppoed = pppoes = pppoe_proto_id = +ptype = quota = raw = represented_port = diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini index b72cd984842..db4f92ce718 100644 --- a/doc/guides/nics/features/iavf.ini +++ b/doc/guides/nics/features/iavf.ini @@ -11,6 +11,8 @@ Speed capabilities = Y Link status = Y Rx interrupt = Y Queue start/stop = Y +Runtime Rx queue setup = Y +Runtime Tx queue setup = Y Power mgmt address monitor = Y MTU update = Y Scattered Rx = Y diff --git a/doc/guides/nics/features/mana.ini b/doc/guides/nics/features/mana.ini index e69bf4af15a..42fd3327d2b 100644 --- a/doc/guides/nics/features/mana.ini +++ b/doc/guides/nics/features/mana.ini @@ -13,6 +13,7 @@ RSS hash = Y L3 checksum offload = Y L4 checksum offload = Y Basic stats = Y +MTU update = Y Multiprocess aware = Y Linux = Y x86-64 = Y diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini index c0e0b779cf3..fc67415c6c6 100644 --- a/doc/guides/nics/features/mlx5.ini +++ b/doc/guides/nics/features/mlx5.ini @@ -82,6 +82,7 @@ mark = Y meta = Y meter_color = Y mpls = Y +nsh = Y nvgre = Y port_id = Y port_representor = Y diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp.ini index 4264943f05c..b53af7b60a5 100644 --- a/doc/guides/nics/features/nfp.ini +++ b/doc/guides/nics/features/nfp.ini @@ -28,6 +28,7 @@ x86-64 = Y Usage doc = Y [rte_flow items] +conntrack = Y eth = Y geneve = Y gre = Y @@ -42,6 +43,7 @@ vlan = Y vxlan = Y [rte_flow actions] +conntrack = Y count = Y drop = Y jump = Y diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst index 1f5ac25c27e..791e9553bce 100644 --- a/doc/guides/nics/i40e.rst +++ b/doc/guides/nics/i40e.rst @@ -101,6 +101,8 @@ For X710/XL710/XXV710, +--------------+-----------------------+------------------+ | DPDK version | Kernel driver version | Firmware version | +==============+=======================+==================+ + | 23.07 | 2.22.20 | 9.20 | + +--------------+-----------------------+------------------+ | 23.03 | 2.22.18 | 9.20 | +--------------+-----------------------+------------------+ | 22.11 | 2.20.12 | 9.01 | @@ -162,6 +164,8 @@ For X722, +--------------+-----------------------+------------------+ | DPDK version | Kernel driver version | Firmware version | +==============+=======================+==================+ + | 23.07 | 2.22.20 | 6.20 | + +--------------+-----------------------+------------------+ | 23.03 | 2.22.18 | 6.20 | +--------------+-----------------------+------------------+ | 22.11 | 2.20.12 | 6.00 | diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst index c351c6bd742..6e71aac3c20 100644 --- a/doc/guides/nics/ice.rst +++ b/doc/guides/nics/ice.rst @@ -70,7 +70,8 @@ The detailed information can refer to chapter Tested Platforms/Tested NICs in re +-----------+---------------+-----------------+-----------+--------------+-----------+ | 23.03 | 1.11.1 | 1.3.30 | 1.3.40 | 1.3.10 | 4.2 | +-----------+---------------+-----------------+-----------+--------------+-----------+ - + | 23.07 | 1.12.6 | 1.3.35 | 1.3.45 | 1.3.13 | 4.3 | + +-----------+---------------+-----------------+-----------+--------------+-----------+ Configuration ------------- @@ -90,25 +91,6 @@ Runtime Configuration NOTE: In Safe mode, only very limited features are available, features like RSS, checksum, fdir, tunneling ... are all disabled. -- ``Generic Flow Pipeline Mode Support`` (default ``0``) - - In pipeline mode, a flow can be set at one specific stage by setting parameter - ``priority``. Currently, we support two stages: priority = 0 or !0. Flows with - priority 0 located at the first pipeline stage which typically be used as a firewall - to drop the packet on a blocklist(we called it permission stage). At this stage, - flow rules are created for the device's exact match engine: switch. Flows with priority - !0 located at the second stage, typically packets are classified here and be steered to - specific queue or queue group (we called it distribution stage), At this stage, flow - rules are created for device's flow director engine. - For none-pipeline mode, ``priority`` is ignored, a flow rule can be created as a flow director - rule or a switch rule depends on its pattern/action and the resource allocation situation, - all flows are virtually at the same pipeline stage. - By default, generic flow API is enabled in none-pipeline mode, user can choose to - use pipeline mode by setting ``devargs`` parameter ``pipeline-mode-support``, - for example:: - - -a 80:00.0,pipeline-mode-support=1 - - ``Default MAC Disable`` (default ``0``) Disable the default MAC make the device drop all packets by default, @@ -320,6 +302,51 @@ The DCF PMD needs to advertise and acquire DCF capability which allows DCF to send AdminQ commands that it would like to execute over to the PF and receive responses for the same from PF. +Generic Flow Support +~~~~~~~~~~~~~~~~~~~~ + +The ice PMD provides support for the Generic Flow API (RTE_FLOW), enabling +users to offload various flow classification tasks to the E810 NIC. +The E810 NIC's packet processing pipeline consists of the following stages: + +Switch: Supports exact match and limited wildcard matching with a large flow +capacity. + +ACL: Supports wildcard matching with a smaller flow capacity (DCF mode only). + +FDIR: Supports exact match with a large flow capacity (PF mode only). + +Hash: Supports RSS (PF mode only) + +The ice PMD utilizes the ice_flow_engine structure to represent each of these +stages and leverages the rte_flow rule's ``group`` attribute for selecting the +appropriate engine for Switch, ACL, and FDIR operations: + +Group 0 maps to Switch +Group 1 maps to ACL +Group 2 maps to FDIR + +In the case of RSS, it will only be selected if a ``RTE_FLOW_ACTION_RSS`` action +is targeted to no queue group, and the group attribute is ignored. + +For each engine, a list of supported patterns is maintained in a global array +named ``ice__supported_pattern``. The Ice PMD will reject any rule with +a pattern that is not included in the supported list. + +One notable feature is the ice PMD's ability to leverage the Raw pattern, +enabling protocol-agnostic flow offloading. Here is an example of creating +a rule that matches an IPv4 destination address of 1.2.3.4 and redirects it to +queue 3 using a raw pattern:: + + flow create 0 ingress group 2 pattern raw \ + pattern spec \ + 00000000000000000000000008004500001400004000401000000000000001020304 \ + pattern mask \ + 000000000000000000000000000000000000000000000000000000000000ffffffff \ + end actions queue index 3 / mark id 3 / end + +Currently, raw pattern support is limited to the FDIR and Hash engines. + Additional Options ++++++++++++++++++ diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst index d365dbc1856..e06d62a873f 100644 --- a/doc/guides/nics/intel_vf.rst +++ b/doc/guides/nics/intel_vf.rst @@ -1,6 +1,8 @@ .. SPDX-License-Identifier: BSD-3-Clause Copyright(c) 2010-2014 Intel Corporation. +.. include:: + Intel Virtual Function Driver ============================= @@ -101,6 +103,10 @@ For more detail on SR-IOV, please refer to the following documents: Set ``devargs`` parameter ``watchdog_period`` to adjust the watchdog period in microseconds, or set it to 0 to disable the watchdog, for example, ``-a 18:01.0,watchdog_period=5000`` or ``-a 18:01.0,watchdog_period=0``. + Enable VF auto-reset by setting the devargs parameter like ``-a 18:01.0,auto_reset=1`` + when IAVF is backed by an Intel\ |reg| E810 device + or an Intel\ |reg| 700 Series Ethernet device. + The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index eac67a7864e..7086f3d1d41 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -153,6 +153,7 @@ Features - RSS support in sample action. - E-Switch mirroring and jump. - E-Switch mirroring and modify. +- Send to kernel. - 21844 flow priorities for ingress or egress flow groups greater than 0 and for any transfer flow group. - Flow quota. @@ -712,6 +713,13 @@ Limitations - The NIC egress flow rules on representor port are not supported. +- Send to kernel action (``RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL``): + + - Supported on non-root table. + - Supported in isolated mode. + - In HW steering (``dv_flow_en`` = 2): + - not supported on guest port. + - During live migration to a new process set its flow engine as standby mode, the user should only program flow rules in group 0 (``fdb_def_rule_en=0``). Live migration is only supported under SWS (``dv_flow_en=1``). diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst index 456a22dcbce..fee1860f4af 100644 --- a/doc/guides/nics/nfp.rst +++ b/doc/guides/nics/nfp.rst @@ -348,6 +348,18 @@ Metadata with L2 (1W/4B) The vlan[0] is the innermost VLAN The vlan[1] is the QinQ info +NFP_NET_META_IPSEC +The IPsec type requires 4 bit. +The SA index value is 32 bit which need 1 data field. +:: + + ---------------------------------------------------------------- + 3 2 1 0 + 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | sa_idx | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + TX ~~ @@ -363,3 +375,22 @@ NFP_NET_META_VLAN ^ ^ NOTE: | TCI | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +NFP_NET_META_IPSEC +The IPsec type requires 12 bit, because it requires three data fields. +:: + + ---------------------------------------------------------------- + 3 2 1 0 + 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | sa_idx | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | nfp_ipsec_force_seq_low | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | nfp_ipsec_force_seq_hi | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + The sa_idx is 32 bit which need 1 data field. + The nfp_ipsec_force_seq_low & nfp_ipsec_force_seq_hi is Anti-re-anti-count, + which is 64 bit need two data fields. diff --git a/doc/guides/nics/vmxnet3.rst b/doc/guides/nics/vmxnet3.rst index db242cd6427..3f498b905de 100644 --- a/doc/guides/nics/vmxnet3.rst +++ b/doc/guides/nics/vmxnet3.rst @@ -100,6 +100,8 @@ There are several options available for filtering packets at VMXNET3 device leve * Multicast with Multicast Filter table is not supported. + * IOVA as VA on AMD hosts is supported from ESXi 7.0 U1 onwards. + Prerequisites ------------- diff --git a/doc/guides/platform/bluefield.rst b/doc/guides/platform/bluefield.rst index 98df5152418..322b08a217d 100644 --- a/doc/guides/platform/bluefield.rst +++ b/doc/guides/platform/bluefield.rst @@ -19,6 +19,7 @@ Supported BlueField Platforms ----------------------------- - `BlueField-2 `_ +- `BlueField-3 `_ Common Offload HW Drivers diff --git a/doc/guides/prog_guide/bbdev.rst b/doc/guides/prog_guide/bbdev.rst index 549f1d002a4..c43e478edac 100644 --- a/doc/guides/prog_guide/bbdev.rst +++ b/doc/guides/prog_guide/bbdev.rst @@ -903,6 +903,12 @@ given below. |RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK | | Set if a device supports loopback access to HARQ internal memory | +--------------------------------------------------------------------+ +|RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS | +| Set if a device includes LLR filler bits in HARQ circular buffer | ++--------------------------------------------------------------------+ +|RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION | +|Set if a device supports input/output 4 bits HARQ compression | ++--------------------------------------------------------------------+ The structure passed for each LDPC decode operation is given below, with the operation flags forming a bitmask in the ``op_flags`` field. @@ -1111,6 +1117,18 @@ with the operation flags forming a bitmask in the ``op_flags`` field. |RTE_BBDEV_FFT_FP16_OUTPUT | | Set if the output data shall use FP16 format instead of INT16 | +--------------------------------------------------------------------+ +|RTE_BBDEV_FFT_TIMING_OFFSET_PER_CS | +| Set if device supports adjusting time offset per CS | ++--------------------------------------------------------------------+ +|RTE_BBDEV_FFT_TIMING_ERROR | +| Set if device supports correcting for timing error | ++--------------------------------------------------------------------+ +|RTE_BBDEV_FFT_DEWINDOWING | +| Set if enabling the option FFT Dewindowing in Frequency domain | ++--------------------------------------------------------------------+ +|RTE_BBDEV_FFT_FREQ_RESAMPLING | +| Set if device supports the optional frequency resampling | ++--------------------------------------------------------------------+ The FFT parameters are set out in the table below. @@ -1121,6 +1139,8 @@ The FFT parameters are set out in the table below. +-------------------------+--------------------------------------------------------------+ |base_output |output data | +-------------------------+--------------------------------------------------------------+ +|dewindowing_input |optional frequency domain dewindowing input data | ++-------------------------+--------------------------------------------------------------+ |power_meas_output |optional output data with power measurement on DFT output | +-------------------------+--------------------------------------------------------------+ |op_flags |bitmask of all active operation capabilities | @@ -1155,6 +1175,16 @@ The FFT parameters are set out in the table below. +-------------------------+--------------------------------------------------------------+ |fp16_exp_adjust |value added to FP16 exponent at conversion from INT16 | +-------------------------+--------------------------------------------------------------+ +|freq_resample_mode |frequency ressampling mode (0:transparent, 1-2: resample) | ++-------------------------+--------------------------------------------------------------+ +| output_depadded_size |output depadded size prior to frequency resampling | ++-------------------------+--------------------------------------------------------------+ +|cs_theta_0 |timing error correction initial phase | ++-------------------------+--------------------------------------------------------------+ +|cs_theta_d |timing error correction phase increment | ++-------------------------+--------------------------------------------------------------+ +|time_offset |time offset per CS of time domain samples | ++-------------------------+--------------------------------------------------------------+ The mbuf input ``base_input`` is mandatory for all bbdev PMDs and is the incoming data for the processing. Its size may not fit into an actual mbuf, @@ -1165,6 +1195,59 @@ either as 2 INT16 or as 2 FP16 based when the option supported. The data layout is based on contiguous concatenation of output data first by cyclic shift then by antenna. +BBDEV MLD-TS Operation +~~~~~~~~~~~~~~~~~~~~~~ + +This operation allows to run the Tree Search (TS) portion of a Maximum Likelihood processing (MLD). + +This alternate equalization option accelerates the exploration of the best combination of +transmitted symbols across layers minimizing the Euclidean distance between the received and +reconstructed signal, then generates the LLRs to be used by the LDPC Decoder. +The input is the results of the Q R decomposition: Q^Hy signal and R matrix. + +The structure passed for each MLD-TS operation is given below, +with the operation flags forming a bitmask in the ``op_flags`` field. + + **NOTE:** The actual operation flags that may be used with a specific + bbdev PMD are dependent on the driver capabilities as reported via + ``rte_bbdev_info_get()``, and may be a subset of those below. + +.. literalinclude:: ../../../lib/bbdev/rte_bbdev_op.h + :language: c + :start-after: Structure rte_bbdev_op_mldts 8< + :end-before: >8 End of structure rte_bbdev_op_mldts. + ++--------------------------------------------------------------------+ +|Description of MLD-TS capability flags | ++====================================================================+ +|RTE_BBDEV_MLDTS_REP | +| Set if the option to use repeated data from R channel is supported | ++--------------------------------------------------------------------+ + +The MLD-TS parameters are set out in the table below. + ++-------------------------+--------------------------------------------------------------+ +|Parameter |Description | ++=========================+==============================================================+ +|qhy_input |input data qHy | ++-------------------------+--------------------------------------------------------------+ +|r_input |input data R triangular matrix | ++-------------------------+--------------------------------------------------------------+ +|output |output data (LLRs) | ++-------------------------+--------------------------------------------------------------+ +|op_flags |bitmask of all active operation capabilities | ++-------------------------+--------------------------------------------------------------+ +|num_rbs |number of Resource Blocks | ++-------------------------+--------------------------------------------------------------+ +|num_layers |number of overlapping layers | ++-------------------------+--------------------------------------------------------------+ +|q_m |array of modulation order for each layer | ++-------------------------+--------------------------------------------------------------+ +|r_rep |optional row repetition for the R matrix (subcarriers) | ++-------------------------+--------------------------------------------------------------+ +|c_rep |optional column repetition for the R matrix (symbols) | ++-------------------------+--------------------------------------------------------------+ + Sample code ----------- diff --git a/doc/guides/prog_guide/dispatcher_lib.rst b/doc/guides/prog_guide/dispatcher_lib.rst new file mode 100644 index 00000000000..f9998f45236 --- /dev/null +++ b/doc/guides/prog_guide/dispatcher_lib.rst @@ -0,0 +1,433 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2023 Ericsson AB. + +Dispatcher Library +================== + +Overview +-------- + +The purpose of the dispatcher is to help reduce coupling in an +:doc:`Eventdev `-based DPDK application. + +In particular, the dispatcher addresses a scenario where an +application's modules share the same event device and event device +ports, and performs work on the same lcore threads. + +The dispatcher replaces the conditional logic that follows an event +device dequeue operation, where events are dispatched to different +parts of the application, typically based on fields in the +``rte_event``, such as the ``queue_id``, ``sub_event_type``, or +``sched_type``. + +Below is an excerpt from a fictitious application consisting of two +modules; A and B. In this example, event-to-module routing is based +purely on queue id, where module A expects all events to a certain +queue id, and module B two other queue ids. + +.. note:: + + Event routing may reasonably be done based on other ``rte_event`` + fields (or even event user data). Indeed, that's the very reason to + have match callback functions, instead of a simple queue + id-to-handler mapping scheme. Queue id-based routing serves well in + a simple example. + +.. code-block:: c + + for (;;) { + struct rte_event events[MAX_BURST]; + unsigned int n; + + n = rte_event_dequeue_burst(dev_id, port_id, events, + MAX_BURST, 0); + + for (i = 0; i < n; i++) { + const struct rte_event *event = &events[i]; + + switch (event->queue_id) { + case MODULE_A_QUEUE_ID: + module_a_process(event); + break; + case MODULE_B_STAGE_0_QUEUE_ID: + module_b_process_stage_0(event); + break; + case MODULE_B_STAGE_1_QUEUE_ID: + module_b_process_stage_1(event); + break; + } + } + } + +The issue this example attempts to illustrate is that the centralized +conditional logic has knowledge of things that should be private to +the modules. In other words, this pattern leads to a violation of +module encapsulation. + +The shared conditional logic contains explicit knowledge about what +events should go where. In case, for example, the +``module_a_process()`` is broken into two processing stages — a +module-internal affair — the shared conditional code must be updated +to reflect this change. + +The centralized event routing code becomes an issue in larger +applications, where modules are developed by different organizations. +This pattern also makes module reuse across different applications more +difficult. The part of the conditional logic relevant for a particular +application may need to be duplicated across many module +instantiations (e.g., applications and test setups). + +The dispatcher separates the mechanism (routing events to their +receiver) from the policy (which events should go where). + +The basic operation of the dispatcher is as follows: + +* Dequeue a batch of events from the event device. +* For each event determine which handler should receive the event, using + a set of application-provided, per-handler event matching callback + functions. +* Provide events matching a particular handler, to that handler, using + its process callback. + +If the above application would have made use of the dispatcher, the +code relevant for its module A may have looked something like this: + +.. code-block:: c + + static bool + module_a_match(const struct rte_event *event, void *cb_data) + { + return event->queue_id == MODULE_A_QUEUE_ID; + } + + static void + module_a_process_events(uint8_t event_dev_id, uint8_t event_port_id, + const struct rte_event *events, + uint16_t num, void *cb_data) + { + uint16_t i; + + for (i = 0; i < num; i++) + module_a_process_event(&events[i]); + } + + /* In the module's initialization code */ + rte_dispatcher_register(dispatcher, module_a_match, NULL, + module_a_process_events, module_a_data); + +.. note:: + + Error handling is left out of this and future example code in this chapter. + +When the shared conditional logic is removed, a new question arises: +which part of the system actually runs the dispatching mechanism? Or +phrased differently, what is replacing the function hosting the shared +conditional logic (typically launched on all lcores using +``rte_eal_remote_launch()``)? To solve this issue, the dispatcher is +run as a DPDK :doc:`Service `. + +The dispatcher is a layer between the application and the event device +in the receive direction. In the transmit (i.e., item of work +submission) direction, the application directly accesses the Eventdev +core API (e.g., ``rte_event_enqueue_burst()``) to submit new or +forwarded events to the event device. + +Dispatcher Creation +------------------- + +A dispatcher is created using the ``rte_dispatcher_create()`` function. + +The event device must be configured before the dispatcher is created. + +Usually, only one dispatcher is needed per event device. A dispatcher +handles exactly one event device. + +A dispatcher is freed using the ``rte_dispatcher_free()`` function. +The dispatcher's service functions must not be running on +any lcore at the point of this call. + +Event Port Binding +------------------ + +To be able to dequeue events, the dispatcher must know which event +ports are to be used, on all the lcores it uses. The application +provides this information using +``rte_dispatcher_bind_port_to_lcore()``. + +This call is typically made from the part of the application that +deals with deployment issues (e.g., iterating lcores and determining +which lcore does what), at the time of application initialization. + +The ``rte_dispatcher_unbind_port_from_lcore()`` is used to undo +this operation. + +Multiple lcore threads may not safely use the same event +port. + +.. note:: + + This property (which is a feature, not a bug) is inherited from the + core Eventdev APIs. + +Event ports cannot safely be bound or unbound while the dispatcher's +service function is running on any lcore. + +Event Handlers +-------------- + +The dispatcher handler is an interface between the dispatcher and an +application module, used to route events to the appropriate part of +the application. + +Handler Registration +^^^^^^^^^^^^^^^^^^^^ + +The event handler interface consists of two function pointers: + +* The ``rte_dispatcher_match_t`` callback, which job is to + decide if this event is to be the property of this handler. +* The ``rte_dispatcher_process_t``, which is used by the + dispatcher to deliver matched events. + +An event handler registration is valid on all lcores. + +The functions pointed to by the match and process callbacks resides in +the application's domain logic, with one or more handlers per +application module. + +A module may use more than one event handler, for convenience or to +further decouple sub-modules. However, the dispatcher may impose an +upper limit of the number of handlers. In addition, installing a large +number of handlers increase dispatcher overhead, although this does +not necessarily translate to a system-level performance degradation. See +the section on :ref:`Event Clustering` for more information. + +Handler registration and unregistration cannot safely be done while +the dispatcher's service function is running on any lcore. + +Event Matching +^^^^^^^^^^^^^^ + +A handler's match callback function decides if an event should be +delivered to this handler, or not. + +An event is routed to no more than one handler. Thus, if a match +function returns true, no further match functions will be invoked for +that event. + +Match functions must not depend on being invocated in any particular +order (e.g., in the handler registration order). + +Events failing to match any handler are dropped, and the +``ev_drop_count`` counter is updated accordingly. + +Event Delivery +^^^^^^^^^^^^^^ + +The handler callbacks are invocated by the dispatcher's service +function, upon the arrival of events to the event ports bound to the +running service lcore. + +A particular event is delivered to at most one handler. + +The application must not depend on all match callback invocations for +a particular event batch being made prior to any process calls are +being made. For example, if the dispatcher dequeues two events from +the event device, it may choose to find out the destination for the +first event, and deliver it, and then continue to find out the +destination for the second, and then deliver that event as well. The +dispatcher may also choose a strategy where no event is delivered +until the destination handler for both events have been determined. + +The events provided in a single process call always belong to the same +event port dequeue burst. + +.. _Event Clustering: + +Event Clustering +^^^^^^^^^^^^^^^^ + +The dispatcher maintains the order of events destined for the same +handler. + +*Order* here refers to the order in which the events were delivered +from the event device to the dispatcher (i.e., in the event array +populated by ``rte_event_dequeue_burst()``), in relation to the order +in which the dispatcher delivers these events to the application. + +The dispatcher *does not* guarantee to maintain the order of events +delivered to *different* handlers. + +For example, assume that ``MODULE_A_QUEUE_ID`` expands to the value 0, +and ``MODULE_B_STAGE_0_QUEUE_ID`` expands to the value 1. Then +consider a scenario where the following events are dequeued from the +event device (qid is short for event queue id). + +.. code-block:: none + + [e0: qid=1], [e1: qid=1], [e2: qid=0], [e3: qid=1] + +The dispatcher may deliver the events in the following manner: + +.. code-block:: none + + module_b_stage_0_process([e0: qid=1], [e1: qid=1]) + module_a_process([e2: qid=0]) + module_b_stage_0_process([e2: qid=1]) + +The dispatcher may also choose to cluster (group) all events destined +for ``module_b_stage_0_process()`` into one array: + +.. code-block:: none + + module_b_stage_0_process([e0: qid=1], [e1: qid=1], [e3: qid=1]) + module_a_process([e2: qid=0]) + +Here, the event ``e2`` is reordered and placed behind ``e3``, from a +delivery order point of view. This kind of reshuffling is allowed, +since the events are destined for different handlers. + +The dispatcher may also deliver ``e2`` before the three events +destined for module B. + +An example of what the dispatcher may not do, is to reorder event +``e1`` so, that it precedes ``e0`` in the array passed to the module +B's stage 0 process callback. + +Although clustering requires some extra work for the dispatcher, it +leads to fewer process function calls. In addition, and likely more +importantly, it improves temporal locality of memory accesses to +handler-specific data structures in the application, which in turn may +lead to fewer cache misses and improved overall performance. + +Finalize +-------- + +The dispatcher may be configured to notify one or more parts of the +application when the matching and processing of a batch of events has +completed. + +The ``rte_dispatcher_finalize_register`` call is used to +register a finalize callback. The function +``rte_dispatcher_finalize_unregister`` is used to remove a +callback. + +The finalize hook may be used by a set of event handlers (in the same +modules, or a set of cooperating modules) sharing an event output +buffer, since it allows for flushing of the buffers at the last +possible moment. In particular, it allows for buffering of +``RTE_EVENT_OP_FORWARD`` events, which must be flushed before the next +``rte_event_dequeue_burst()`` call is made (assuming implicit release +is employed). + +The following is an example with an application-defined event output +buffer (the ``event_buffer``): + +.. code-block:: c + + static void + finalize_batch(uint8_t event_dev_id, uint8_t event_port_id, + void *cb_data) + { + struct event_buffer *buffer = cb_data; + unsigned lcore_id = rte_lcore_id(); + struct event_buffer_lcore *lcore_buffer = + &buffer->lcore_buffer[lcore_id]; + + event_buffer_lcore_flush(lcore_buffer); + } + + /* In the module's initialization code */ + rte_dispatcher_finalize_register(dispatcher, finalize_batch, + shared_event_buffer); + +The dispatcher does not track any relationship between a handler and a +finalize callback, and all finalize callbacks will be called, if (and +only if) at least one event was dequeued from the event device. + +Finalize callback registration and unregistration cannot safely be +done while the dispatcher's service function is running on any lcore. + +Service +------- + +The dispatcher is a DPDK service, and is managed in a manner similar +to other DPDK services (e.g., an Event Timer Adapter). + +Below is an example of how to configure a particular lcore to serve as +a service lcore, and to map an already-configured dispatcher +(identified by ``DISPATCHER_ID``) to that lcore. + +.. code-block:: c + + static void + launch_dispatcher_core(struct rte_dispatcher *dispatcher, + unsigned lcore_id) + { + uint32_t service_id; + + rte_service_lcore_add(lcore_id); + + rte_dispatcher_service_id_get(dispatcher, &service_id); + + rte_service_map_lcore_set(service_id, lcore_id, 1); + + rte_service_lcore_start(lcore_id); + + rte_service_runstate_set(service_id, 1); + } + +As the final step, the dispatcher must be started. + +.. code-block:: c + + rte_dispatcher_start(dispatcher); + + +Multi Service Dispatcher Lcores +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In an Eventdev application, most (or all) compute-intensive and +performance-sensitive processing is done in an event-driven manner, +where CPU cycles spent on application domain logic is the direct +result of items of work (i.e., ``rte_event`` events) dequeued from an +event device. + +In the light of this, it makes sense to have the dispatcher service be +the only DPDK service on all lcores used for packet processing — at +least in principle. + +However, there is nothing in DPDK that prevents colocating other +services with the dispatcher service on the same lcore. + +Tasks that prior to the introduction of the dispatcher into the +application was performed on the lcore, even though no events were +received, are prime targets for being converted into such auxiliary +services, running on the dispatcher core set. + +An example of such a task would be the management of a per-lcore timer +wheel (i.e., calling ``rte_timer_manage()``). + +Applications employing :doc:`Read-Copy-Update (RCU) ` (or +similar technique) may opt for having quiescent state (e.g., calling +``rte_rcu_qsbr_quiescent()``) signaling factored out into a separate +service, to assure resource reclaiming occurs even though some +lcores currently do not process any events. + +If more services than the dispatcher service is mapped to a service +lcore, it's important that the other service are well-behaved and +don't interfere with event processing to the extent the system's +throughput and/or latency requirements are at risk of not being met. + +In particular, to avoid jitter, they should have a small upper bound +for the maximum amount of time spent in a single service function +call. + +An example of scenario with a more CPU-heavy colocated service is a +low-lcore count deployment, where the event device lacks the +``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` capability (and thus +requires software to feed incoming packets into the event device). In +this case, the best performance may be achieved if the Event Ethernet +RX and/or TX Adapters are mapped to lcores also used for event +dispatching, since otherwise the adapter lcores would have a lot of +idle CPU cycles. diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst index 89014789de9..6debf54efb9 100644 --- a/doc/guides/prog_guide/env_abstraction_layer.rst +++ b/doc/guides/prog_guide/env_abstraction_layer.rst @@ -756,7 +756,7 @@ Control Thread API ~~~~~~~~~~~~~~~~~~ It is possible to create Control Threads using the public API -``rte_ctrl_thread_create()``. +``rte_thread_create_control()``. Those threads can be used for management/infrastructure tasks and are used internally by DPDK for multi process support and interrupt handling. diff --git a/doc/guides/prog_guide/event_dma_adapter.rst b/doc/guides/prog_guide/event_dma_adapter.rst new file mode 100644 index 00000000000..3443b6a8032 --- /dev/null +++ b/doc/guides/prog_guide/event_dma_adapter.rst @@ -0,0 +1,289 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright (c) 2023 Marvell. + +Event DMA Adapter Library +========================= + +DPDK :doc:`eventdev library ` provides event driven programming model +with features to schedule events. +:doc:`DMA device library ` provides an interface to DMA poll mode drivers +that support DMA operations. +Event DMA adapter is intended to bridge between the event device and the DMA device. + +Packet flow from DMA device to the event device can be accomplished +using software and hardware based transfer mechanisms. +The adapter queries an eventdev PMD to determine which mechanism to be used. +The adapter uses an EAL service core function for software-based packet transfer +and uses the eventdev PMD functions to configure hardware-based packet transfer +between DMA device and the event device. +DMA adapter uses a new event type called ``RTE_EVENT_TYPE_DMADEV`` +to indicate the source of event. + +Application can choose to submit a DMA operation directly to a DMA device +or send it to a DMA adapter via eventdev +based on ``RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD`` capability. +The first mode is known as the event new (``RTE_EVENT_DMA_ADAPTER_OP_NEW``) mode +and the second as the event forward (``RTE_EVENT_DMA_ADAPTER_OP_FORWARD``) mode. +Choice of mode can be specified while creating the adapter. +In the former mode, it is the application's responsibility to enable ingress packet ordering. +In the latter mode, it is the adapter's responsibility to enable ingress packet ordering. + + +Adapter Modes +------------- + +RTE_EVENT_DMA_ADAPTER_OP_NEW mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the ``RTE_EVENT_DMA_ADAPTER_OP_NEW`` mode, +application submits DMA operations directly to an DMA device. +The adapter then dequeues DMA completions from the DMA device +and enqueues them as events to the event device. +This mode does not ensure ingress ordering +as the application directly enqueues to the dmadev without going through DMA/atomic stage. +In this mode, events dequeued from the adapter are treated as new events. +The application has to specify event information (response information) +which is needed to enqueue an event after the DMA operation is completed. + +.. _figure_event_dma_adapter_op_new: + +.. figure:: img/event_dma_adapter_op_new.* + + Working model of ``RTE_EVENT_DMA_ADAPTER_OP_NEW`` mode + + +RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` mode, +if the event PMD and DMA PMD supports internal event port +(``RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD``), +the application should use ``rte_event_dma_adapter_enqueue()`` API +to enqueue DMA operations as events to DMA adapter. +If not, application retrieves DMA adapter's event port +using ``rte_event_dma_adapter_event_port_get()`` API, +links its event queue to this port +and starts enqueuing DMA operations as events to eventdev +using ``rte_event_enqueue_burst()``. +The adapter then dequeues the events +and submits the DMA operations to the dmadev. +After the DMA operation is complete, +the adapter enqueues events to the event device. + +Applications can use this mode when ingress packet ordering is needed. +In this mode, events dequeued from the adapter will be treated as forwarded events. +Application has to specify event information (response information) +needed to enqueue the event after the DMA operation has completed. + +.. _figure_event_dma_adapter_op_forward: + +.. figure:: img/event_dma_adapter_op_forward.* + + Working model of ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` mode + + +API Overview +------------ + +This section has a brief introduction to the event DMA adapter APIs. +The application is expected to create an adapter +which is associated with a single eventdev, +then add dmadev and vchan to the adapter instance. + + +Create an adapter instance +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An adapter instance is created using ``rte_event_dma_adapter_create()``. +This function is called with event device +to be associated with the adapter and port configuration +for the adapter to setup an event port (if the adapter needs to use a service function). + +Adapter can be started in ``RTE_EVENT_DMA_ADAPTER_OP_NEW`` +or ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` mode. + +.. code-block:: c + + enum rte_event_dma_adapter_mode mode; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf conf; + uint8_t evdev_id; + uint8_t dma_id; + int ret; + + ret = rte_event_dev_info_get(dma_id, &dev_info); + + conf.new_event_threshold = dev_info.max_num_events; + conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD; + ret = rte_event_dma_adapter_create(dma_id, evdev_id, &conf, mode); + + +``rte_event_dma_adapter_create_ext()`` function can be used by the application +to have a finer control on eventdev port allocation and setup. +The ``rte_event_dma_adapter_create_ext()`` function is passed a callback function. +The callback function is invoked if the adapter creates a service function +and uses an event port for it. +The callback is expected to fill the ``struct rte_event_dma_adapter_conf`` passed to it. + +In the ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` mode, +if the event PMD and DMA PMD supports internal event port +(``RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD``), +events with DMA operations should be enqueued to the DMA adapter +using ``rte_event_dma_adapter_enqueue()`` API. +If not, the event port created by the adapter can be retrieved +using ``rte_event_dma_adapter_event_port_get()`` API. +An application can use this event port to link with an event queue, +on which it enqueues events towards the DMA adapter using ``rte_event_enqueue_burst()``. + +.. code-block:: c + + uint8_t dma_adpt_id, evdev_id, dma_dev_id, dma_ev_port_id, app_qid; + struct rte_event ev; + uint32_t cap; + int ret; + + /* Fill in event info and update event_ptr with rte_dma_op */ + memset(&ev, 0, sizeof(ev)); + . + . + ev.event_ptr = op; + + ret = rte_event_dma_adapter_caps_get(evdev_id, dma_dev_id, &cap); + if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) { + ret = rte_event_dma_adapter_enqueue(evdev_id, app_ev_port_id, ev, nb_events); + } else { + ret = rte_event_dma_adapter_event_port_get(dma_adpt_id, &dma_ev_port_id); + ret = rte_event_queue_setup(evdev_id, app_qid, NULL); + ret = rte_event_port_link(evdev_id, dma_ev_port_id, &app_qid, NULL, 1); + ev.queue_id = app_qid; + ret = rte_event_enqueue_burst(evdev_id, app_ev_port_id, ev, nb_events); + } + + +Event device configuration for service based adapter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When ``rte_event_dma_adapter_create()`` is used for creating adapter instance, +``rte_event_dev_config::nb_event_ports`` is automatically incremented, +and event device is reconfigured with additional event port during service initialization. +This event device reconfigure logic also +increments the ``rte_event_dev_config::nb_single_link_event_port_queues`` parameter +if the adapter event port config is of type ``RTE_EVENT_PORT_CFG_SINGLE_LINK``. + +Applications using this mode of adapter creation need not configure the event device +with ``rte_event_dev_config::nb_event_ports`` and +``rte_event_dev_config::nb_single_link_event_port_queues`` parameters +required for DMA adapter when the adapter is created using the above-mentioned API. + + +Querying adapter capabilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``rte_event_dma_adapter_caps_get()`` function allows the application +to query the adapter capabilities for an eventdev and dmadev combination. +This API provides whether dmadev and eventdev are connected using internal HW port or not. + +.. code-block:: c + + rte_event_dma_adapter_caps_get(dev_id, dma_dev_id, &cap); + + +Adding vchan to the adapter instance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +dmadev device ID and vchan are configured using dmadev APIs. +For more information, see :doc:`dmadev `. + +.. code-block:: c + + struct rte_dma_vchan_conf vchan_conf; + struct rte_dma_conf dev_conf; + uint8_t dev_id = 0; + uint16_t vchan = 0; + + rte_dma_configure(dev_id, &dev_conf); + rte_dma_vchan_setup(dev_id, vchan, &vchan_conf); + +These dmadev ID and vchan are added to the instance +using the ``rte_event_dma_adapter_vchan_add()`` API. +The same is removed using ``rte_event_dma_adapter_vchan_del()`` API. +If hardware supports ``RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND`` capability, +event information must be passed to the add API. + +.. code-block:: c + + uint32_t cap; + int ret; + + ret = rte_event_dma_adapter_caps_get(evdev_id, dma_dev_id, &cap); + if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) { + struct rte_event event; + + rte_event_dma_adapter_vchan_add(id, dma_dev_id, vchan, &conf); + } else + rte_event_dma_adapter_vchan_add(id, dma_dev_id, vchan, NULL); + + +Configuring service function +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the adapter uses a service function, the application is required +to assign a service core to the service function as show below. + +.. code-block:: c + + uint32_t service_id; + + if (rte_event_dma_adapter_service_id_get(dma_id, &service_id) == 0) + rte_service_map_lcore_set(service_id, CORE_ID); + + +Set event response information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` / ``RTE_EVENT_DMA_ADAPTER_OP_NEW`` mode, +the application specifies the dmadev ID and vchan ID in ``struct rte_event_dma_adapter_op`` +and the event information (response information) +needed to enqueue an event after the DMA operation has completed. +The response information is specified in ``struct rte_event`` +and appended to the ``struct rte_event_dma_adapter_op``. + + +Start the adapter instance +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The application calls ``rte_event_dma_adapter_start()`` to start the adapter. +This function calls the start callbacks of the eventdev PMDs +for hardware-based eventdev-dmadev connections +and ``rte_service_run_state_set()`` to enable the service function if one exists. + +.. code-block:: c + + rte_event_dma_adapter_start(id); + +.. note:: + + The eventdev to which the event_dma_adapter is connected should be started + before calling ``rte_event_dma_adapter_start()``. + + +Get adapter statistics +~~~~~~~~~~~~~~~~~~~~~~ + +The ``rte_event_dma_adapter_stats_get()`` function reports counters +defined in ``struct rte_event_dma_adapter_stats``. +The received packet and enqueued event counts are a sum of the counts +from the eventdev PMD callbacks if the callback is supported, +and the counts maintained by the service function, if one exists. + + +Set/Get adapter runtime configuration parameters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The runtime configuration parameters of adapter can be set/get using +``rte_event_dma_adapter_runtime_params_set()`` and +``rte_event_dma_adapter_runtime_params_get()`` respectively. +The parameters that can be set/get are defined in +``struct rte_event_dma_adapter_runtime_params``. diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 7c5e73b9fde..2e68cca798b 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -71,6 +71,9 @@ set to true. The function is passed the event device to be associated with the adapter and port configuration for the adapter to setup an event port if the adapter needs to use a service function. +If the application desires to control both the event port allocation and event +buffer size, ``rte_event_eth_rx_adapter_create_ext_with_params()`` can be used. + Event device configuration for service based adapter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst index 2c831768466..9d398d07f7f 100644 --- a/doc/guides/prog_guide/eventdev.rst +++ b/doc/guides/prog_guide/eventdev.rst @@ -317,6 +317,46 @@ can be achieved like this: } int links_made = rte_event_port_link(dev_id, tx_port_id, &single_link_q, &priority, 1); +Linking Queues to Ports with link profiles +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An application can use link profiles if supported by the underlying event device to setup up +multiple link profile per port and change them run time depending up on heuristic data. +Using Link profiles can reduce the overhead of linking/unlinking and wait for unlinks in progress +in fast-path and gives applications the ability to switch between preset profiles on the fly. + +An example use case could be as follows. + +Config path: + +.. code-block:: c + + uint8_t lq[4] = {4, 5, 6, 7}; + uint8_t hq[4] = {0, 1, 2, 3}; + + if (rte_event_dev_info.max_profiles_per_port < 2) + return -ENOTSUP; + + rte_event_port_profile_links_set(0, 0, hq, NULL, 4, 0); + rte_event_port_profile_links_set(0, 0, lq, NULL, 4, 1); + +Worker path: + +.. code-block:: c + + uint8_t profile_id_to_switch; + + while (1) { + deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0); + if (deq == 0) { + profile_id_to_switch = app_find_profile_id_to_switch(); + rte_event_port_profile_switch(0, 0, profile_id_to_switch); + continue; + } + + // Process the event received. + } + Starting the EventDev ~~~~~~~~~~~~~~~~~~~~~ @@ -333,7 +373,8 @@ eventdev. .. Note:: EventDev needs to be started before starting the event producers such - as event_eth_rx_adapter, event_timer_adapter and event_crypto_adapter. + as event_eth_rx_adapter, event_timer_adapter, event_crypto_adapter and + event_dma_adapter. Ingress of New Events ~~~~~~~~~~~~~~~~~~~~~ @@ -445,8 +486,9 @@ using ``rte_event_dev_stop_flush_callback_register()`` function. .. Note:: The event producers such as ``event_eth_rx_adapter``, - ``event_timer_adapter`` and ``event_crypto_adapter`` - need to be stopped before stopping the event device. + ``event_timer_adapter``, ``event_crypto_adapter`` and + ``event_dma_adapter`` need to be stopped before stopping + the event device. Summary ------- diff --git a/doc/guides/prog_guide/graph_lib.rst b/doc/guides/prog_guide/graph_lib.rst index e7b6e12004e..96cff9ccc7e 100644 --- a/doc/guides/prog_guide/graph_lib.rst +++ b/doc/guides/prog_guide/graph_lib.rst @@ -453,6 +453,14 @@ to determine the L2 header to be written to the packet before sending the packet out to a particular ethdev_tx node. ``rte_node_ip4_rewrite_add()`` is control path API to add next-hop info. +ip4_reassembly +~~~~~~~~~~~~~~ +This node is an intermediate node that reassembles ipv4 fragmented packets, +non-fragmented packets pass through the node un-effected. +The node rewrites its stream and moves it to the next node. +The fragment table and death row table should be setup via the +``rte_node_ip4_reassembly_configure`` API. + ip6_lookup ~~~~~~~~~~ This node is an intermediate node that does LPM lookup for the received @@ -498,3 +506,43 @@ Uses ``poll`` function to poll on the socket fd for ``POLLIN`` events to read the packets from raw socket to stream buffer and does ``rte_node_next_stream_move()`` when there are received packets. + +ip4_local +~~~~~~~~~ +This node is an intermediate node that does ``packet_type`` lookup for +the received ipv4 packets and the result determines each packets next node. + +On successful ``packet_type`` lookup, for any IPv4 protocol the result +contains the ``next_node`` id and ``next-hop`` id with which the packet +needs to be further processed. + +On packet_type lookup failure, objects are redirected to ``pkt_drop`` node. +``rte_node_ip4_route_add()`` is control path API to add ipv4 address with 32 bit +depth to receive to packets. +To achieve home run, node use ``rte_node_stream_move()`` as mentioned in above +sections. + +udp4_input +~~~~~~~~~~ +This node is an intermediate node that does udp destination port lookup for +the received ipv4 packets and the result determines each packets next node. + +User registers a new node ``udp4_input`` into graph library during initialization +and attach user specified node as edege to this node using +``rte_node_udp4_usr_node_add()``, and create empty hash table with destination +port and node id as its feilds. + +After successful addition of user node as edege, edge id is returned to the user. + +User would register ``ip4_lookup`` table with specified ip address and 32 bit as mask +for ip filtration using api ``rte_node_ip4_route_add()``. + +After graph is created user would update hash table with custom port with +and previously obtained edge id using API ``rte_node_udp4_dst_port_add()``. + +When packet is received lpm look up is performed if ip is matched the packet +is handed over to ip4_local node, then packet is verified for udp proto and +on success packet is enqueued to ``udp4_input`` node. + +Hash lookup is performed in ``udp4_input`` node with registered destination port +and destination port in UDP packet , on success packet is handed to ``udp_user_node``. diff --git a/doc/guides/prog_guide/img/bond-mode-0.svg b/doc/guides/prog_guide/img/bond-mode-0.svg index 850e4d3b629..ae0af226881 100644 --- a/doc/guides/prog_guide/img/bond-mode-0.svg +++ b/doc/guides/prog_guide/img/bond-mode-0.svg @@ -209,7 +209,7 @@ id="shape4-13" transform="translate(6.87402,-7.17304)">Rectangle.16bonded ethdevbonding ethdevbonded ethdev + style="fill:#feffff;font-family:Calibri">bonding ethdev <v:userDefs><v:ud - v:nameU="msvSubprocessMaster" + v:nameU="msvSubprocessMain" v:prompt="" v:val="VT4(Rectangle)" /><v:ud v:nameU="msvNoAutoConnect" @@ -243,7 +243,7 @@ v:groupContext="shape" transform="translate(6.87402,-7.17304)"><title id="title4060">Rectangle.16bonded ethdevbonding ethdevbonded ethdev + v:horizAlign="1" />bonding ethdev Rectangle.16bonded ethdevbonding ethdevbonded ethdev + style="fill:#feffff;font-family:Calibri">bonding ethdev <title id="title5779">Rectangle.16bonded ethdevbonding ethdevbonded ethdev + style="fill:#feffff;font-family:Calibri">bonding ethdev <title id="title6063">Rectangle.16bonded ethdevbonding ethdevbonded ethdev + style="fill:#feffff;font-family:Calibri">bonding ethdev <title id="title6371">Rectangle.16bonded ethdevbonding ethdevbonded ethdev + style="fill:#feffff;font-family:Calibri">bonding ethdev DPDK</text> </g> <g id="shape8-6" transform="translate(12.5358,-7.83661)"> <title>Rectangle.8 - bonded ethdev + bonding ethdev - bonded ethdev + bonding ethdev Rectangle User Application diff --git a/doc/guides/prog_guide/img/event_dma_adapter_op_forward.svg b/doc/guides/prog_guide/img/event_dma_adapter_op_forward.svg new file mode 100644 index 00000000000..b7fe1fecf23 --- /dev/null +++ b/doc/guides/prog_guide/img/event_dma_adapter_op_forward.svg @@ -0,0 +1,1086 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + + + + + 1 + + + 2 + + + + 8 + + + + + 7 + + + + + 3 + + + + 4 + + + 5 + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + + 6 + + + Eventdev + + + DMAAdapter + + + Applicationin orderedstage + + + DMA Device + + + 1. Events from the previous stage. 2. Application in ordered stage dequeues events from eventdev. 3. Application enqueues DMA operations as events to eventdev. 4. DMA adapter dequeues event from eventdev. 5. DMA adapter submits DMA operations to DMA Device (Atomic stage) 6. DMA adapter dequeues DMA completions from DMA Device 7. DMA adapter enqueues events to the eventdev 8. Events to the next stage + + + diff --git a/doc/guides/prog_guide/img/event_dma_adapter_op_new.svg b/doc/guides/prog_guide/img/event_dma_adapter_op_new.svg new file mode 100644 index 00000000000..e9e8bb2b987 --- /dev/null +++ b/doc/guides/prog_guide/img/event_dma_adapter_op_new.svg @@ -0,0 +1,1079 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + +   + + + + + + + + + + + + + 1 + + + 2 + + + + + 3 + + + 4 + + + 6 + + + Eventdev + + + Atomic Stage+Enqueue toDMA Device + + + 5 + +   + + DMA Device + + + DMAAdapter + + + 1. Application dequeues events from the previous stage 2. Application prepares the DMA operations. 3. DMA operations are submitted to dmadev by application. 4. DMA adapter dequeues DMA completions from DMA device. 5. DMA adapter enqueues events to the eventdev. 6. Application dequeues from eventdev and prepare for further processing + + + Square + Atomic Queue #1 + + + + + + + + + + + + + + + + Application + + + diff --git a/doc/guides/prog_guide/img/graph_inbuilt_node_flow.svg b/doc/guides/prog_guide/img/graph_inbuilt_node_flow.svg index 7eea94701ff..7c451371a7a 100644 --- a/doc/guides/prog_guide/img/graph_inbuilt_node_flow.svg +++ b/doc/guides/prog_guide/img/graph_inbuilt_node_flow.svg @@ -37,174 +37,229 @@ digraph dpdk_inbuilt_nodes_flow { ethdev_tx -> pkt_drop [color="cyan" style="dashed"] pkt_cls->pkt_drop [color="cyan" style="dashed"] kernel_tx -> kernel_rx [color="red" style="dashed"] + ip4_lookup -> ip4_local + ip4_local -> pkt_drop [color="cyan" style="dashed"] + ip4_local -> udp4_input [ label="udpv4"] + udp4_input -> udp_user_node + udp4_input -> pkt_drop [color="cyan" style="dashed"] + } --> - - + + dpdk_inbuilt_nodes_flow - + ethdev_rx - -ethdev_rx + +ethdev_rx pkt_cls - -pkt_cls + +pkt_cls ethdev_rx->pkt_cls - - + + kernel_rx - -kernel_rx + +kernel_rx kernel_rx->pkt_cls - - + + ethdev_tx - -ethdev_tx + +ethdev_tx pkt_drop - -pkt_drop + +pkt_drop ethdev_tx->pkt_drop - - + + kernel_tx - -kernel_tx + +kernel_tx kernel_tx->kernel_rx - - + + pkt_cls->pkt_drop - - + + pkt_cls->kernel_tx - - -exception pkts + + +exception pkts ip4_lookup - -ip4_lookup + +ip4_lookup pkt_cls->ip4_lookup - - -ipv4 + + +ipv4 ip6_lookup - -ip6_lookup + +ip6_lookup pkt_cls->ip6_lookup - - -ipv6 + + +ipv6 ip4_lookup->pkt_drop - - + + ip4_rewrite - -ip4_rewrite + +ip4_rewrite ip4_lookup->ip4_rewrite - - + + + + + +ip4_local + +ip4_local + + + +ip4_lookup->ip4_local + + ip6_lookup->pkt_drop - - + + ip6_rewrite - -ip6_rewrite + +ip6_rewrite ip6_lookup->ip6_rewrite - - + + ip4_rewrite->ethdev_tx - - + + ip4_rewrite->pkt_drop - - + + ip6_rewrite->ethdev_tx - - + + ip6_rewrite->pkt_drop - - + + + + + +ip4_local->pkt_drop + + + + + +udp4_input + +udp4_input + + + +ip4_local->udp4_input + + +udpv4 + + + +udp4_input->pkt_drop + + + + + +udp_user_node + +udp_user_node + + + +udp4_input->udp_user_node + + diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst index 52a6d9e7aae..e517f0e2592 100644 --- a/doc/guides/prog_guide/index.rst +++ b/doc/guides/prog_guide/index.rst @@ -60,6 +60,8 @@ Programmer's Guide event_ethernet_tx_adapter event_timer_adapter event_crypto_adapter + event_dma_adapter + dispatcher_lib qos_framework power_man packet_classif_access_ctrl diff --git a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst index 1f66154e356..60717a35876 100644 --- a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst +++ b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst @@ -10,20 +10,20 @@ allows physical PMDs to be bonded together to create a single logical PMD. .. figure:: img/bond-overview.* - Bonded PMDs + Bonding PMDs The Link Bonding PMD library(librte_net_bond) supports bonding of groups of ``rte_eth_dev`` ports of the same speed and duplex to provide similar capabilities to that found in Linux bonding driver to allow the aggregation -of multiple (slave) NICs into a single logical interface between a server -and a switch. The new bonded PMD will then process these interfaces based on +of multiple (member) NICs into a single logical interface between a server +and a switch. The new bonding PMD will then process these interfaces based on the mode of operation specified to provide support for features such as redundant links, fault tolerance and/or load balancing. The librte_net_bond library exports a C API which provides an API for the -creation of bonded devices as well as the configuration and management of the -bonded device and its slave devices. +creation of bonding devices as well as the configuration and management of the +bonding device and its member devices. .. note:: @@ -45,7 +45,7 @@ Currently the Link Bonding PMD library supports following modes of operation: This mode provides load balancing and fault tolerance by transmission of - packets in sequential order from the first available slave device through + packets in sequential order from the first available member device through the last. Packets are bulk dequeued from devices then serviced in a round-robin manner. This mode does not guarantee in order reception of packets and down stream should be able to handle out of order packets. @@ -57,10 +57,10 @@ Currently the Link Bonding PMD library supports following modes of operation: Active Backup (Mode 1) - In this mode only one slave in the bond is active at any time, a different - slave becomes active if, and only if, the primary active slave fails, - thereby providing fault tolerance to slave failure. The single logical - bonded interface's MAC address is externally visible on only one NIC (port) + In this mode only one member in the bond is active at any time, a different + member becomes active if, and only if, the primary active member fails, + thereby providing fault tolerance to member failure. The single logical + bonding interface's MAC address is externally visible on only one NIC (port) to avoid confusing the network switch. * **Balance XOR (Mode 2):** @@ -73,10 +73,10 @@ Currently the Link Bonding PMD library supports following modes of operation: This mode provides transmit load balancing (based on the selected transmission policy) and fault tolerance. The default policy (layer2) uses a simple calculation based on the packet flow source and destination MAC - addresses as well as the number of active slaves available to the bonded - device to classify the packet to a specific slave to transmit on. Alternate + addresses as well as the number of active members available to the bonding + device to classify the packet to a specific member to transmit on. Alternate transmission policies supported are layer 2+3, this takes the IP source and - destination addresses into the calculation of the transmit slave port and + destination addresses into the calculation of the transmit member port and the final supported policy is layer 3+4, this uses IP source and destination addresses as well as the TCP/UDP source and destination port. @@ -92,7 +92,7 @@ Currently the Link Bonding PMD library supports following modes of operation: Broadcast (Mode 3) - This mode provides fault tolerance by transmission of packets on all slave + This mode provides fault tolerance by transmission of packets on all member ports. * **Link Aggregation 802.3AD (Mode 4):** @@ -114,7 +114,7 @@ Currently the Link Bonding PMD library supports following modes of operation: intervals period of less than 100ms. #. Calls to ``rte_eth_tx_burst`` must have a buffer size of at least 2xN, - where N is the number of slaves. This is a space required for LACP + where N is the number of members. This is a space required for LACP frames. Additionally LACP packets are included in the statistics, but they are not returned to the application. @@ -126,88 +126,88 @@ Currently the Link Bonding PMD library supports following modes of operation: This mode provides an adaptive transmit load balancing. It dynamically - changes the transmitting slave, according to the computed load. Statistics + changes the transmitting member, according to the computed load. Statistics are collected in 100ms intervals and scheduled every 10ms. Implementation Details ---------------------- -The librte_net_bond bonded device are compatible with the Ethernet device API +The librte_net_bond bonding device is compatible with the Ethernet device API exported by the Ethernet PMDs described in the *DPDK API Reference*. -The Link Bonding Library supports the creation of bonded devices at application +The Link Bonding Library supports the creation of bonding devices at application startup time during EAL initialization using the ``--vdev`` option as well as programmatically via the C API ``rte_eth_bond_create`` function. -Bonded devices support the dynamical addition and removal of slave devices using -the ``rte_eth_bond_slave_add`` / ``rte_eth_bond_slave_remove`` APIs. +Bonding devices support the dynamical addition and removal of member devices using +the ``rte_eth_bond_member_add`` / ``rte_eth_bond_member_remove`` APIs. -After a slave device is added to a bonded device slave is stopped using +After a member device is added to a bonding device member is stopped using ``rte_eth_dev_stop`` and then reconfigured using ``rte_eth_dev_configure`` the RX and TX queues are also reconfigured using ``rte_eth_tx_queue_setup`` / ``rte_eth_rx_queue_setup`` with the parameters use to configure the bonding device. If RSS is enabled for bonding device, this mode is also enabled on new -slave and configured as well. +member and configured as well. Any flow which was configured to the bond device also is configured to the added -slave. +member. Setting up multi-queue mode for bonding device to RSS, makes it fully -RSS-capable, so all slaves are synchronized with its configuration. This mode is -intended to provide RSS configuration on slaves transparent for client +RSS-capable, so all members are synchronized with its configuration. This mode is +intended to provide RSS configuration on members transparent for client application implementation. Bonding device stores its own version of RSS settings i.e. RETA, RSS hash -function and RSS key, used to set up its slaves. That let to define the meaning +function and RSS key, used to set up its members. That let to define the meaning of RSS configuration of bonding device as desired configuration of whole bonding -(as one unit), without pointing any of slave inside. It is required to ensure +(as one unit), without pointing any of member inside. It is required to ensure consistency and made it more error-proof. RSS hash function set for bonding device, is a maximal set of RSS hash functions -supported by all bonded slaves. RETA size is a GCD of all its RETA's sizes, so -it can be easily used as a pattern providing expected behavior, even if slave -RETAs' sizes are different. If RSS Key is not set for bonded device, it's not -changed on the slaves and default key for device is used. +supported by all bonding members. RETA size is a GCD of all its RETA's sizes, so +it can be easily used as a pattern providing expected behavior, even if member +RETAs' sizes are different. If RSS Key is not set for bonding device, it's not +changed on the members and default key for device is used. -As RSS configurations, there is flow consistency in the bonded slaves for the +As RSS configurations, there is flow consistency in the bonding members for the next rte flow operations: Validate: - - Validate flow for each slave, failure at least for one slave causes to + - Validate flow for each member, failure at least for one member causes to bond validation failure. Create: - - Create the flow in all slaves. - - Save all the slaves created flows objects in bonding internal flow + - Create the flow in all members. + - Save all the members created flows objects in bonding internal flow structure. - - Failure in flow creation for existed slave rejects the flow. - - Failure in flow creation for new slaves in slave adding time rejects - the slave. + - Failure in flow creation for existed member rejects the flow. + - Failure in flow creation for new members in member adding time rejects + the member. Destroy: - - Destroy the flow in all slaves and release the bond internal flow + - Destroy the flow in all members and release the bond internal flow memory. Flush: - - Destroy all the bonding PMD flows in all the slaves. + - Destroy all the bonding PMD flows in all the members. .. note:: - Don't call slaves flush directly, It destroys all the slave flows which + Don't call members flush directly, It destroys all the member flows which may include external flows or the bond internal LACP flow. Query: - - Summarize flow counters from all the slaves, relevant only for + - Summarize flow counters from all the members, relevant only for ``RTE_FLOW_ACTION_TYPE_COUNT``. Isolate: - - Call to flow isolate for all slaves. - - Failure in flow isolation for existed slave rejects the isolate mode. - - Failure in flow isolation for new slaves in slave adding time rejects - the slave. + - Call to flow isolate for all members. + - Failure in flow isolation for existed member rejects the isolate mode. + - Failure in flow isolation for new members in member adding time rejects + the member. All settings are managed through the bonding port API and always are propagated -in one direction (from bonding to slaves). +in one direction (from bonding to members). Link Status Change Interrupts / Polling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -215,16 +215,16 @@ Link Status Change Interrupts / Polling Link bonding devices support the registration of a link status change callback, using the ``rte_eth_dev_callback_register`` API, this will be called when the status of the bonding device changes. For example in the case of a bonding -device which has 3 slaves, the link status will change to up when one slave -becomes active or change to down when all slaves become inactive. There is no -callback notification when a single slave changes state and the previous -conditions are not met. If a user wishes to monitor individual slaves then they -must register callbacks with that slave directly. +device which has 3 members, the link status will change to up when one member +becomes active or change to down when all members become inactive. There is no +callback notification when a single member changes state and the previous +conditions are not met. If a user wishes to monitor individual members then they +must register callbacks with that member directly. The link bonding library also supports devices which do not implement link status change interrupts, this is achieved by polling the devices link status at a defined period which is set using the ``rte_eth_bond_link_monitoring_set`` -API, the default polling interval is 10ms. When a device is added as a slave to +API, the default polling interval is 10ms. When a device is added as a member to a bonding device it is determined using the ``RTE_PCI_DRV_INTR_LSC`` flag whether the device supports interrupts or whether the link status should be monitored by polling it. @@ -233,31 +233,31 @@ Requirements / Limitations ~~~~~~~~~~~~~~~~~~~~~~~~~~ The current implementation only supports devices that support the same speed -and duplex to be added as a slaves to the same bonded device. The bonded device -inherits these attributes from the first active slave added to the bonded -device and then all further slaves added to the bonded device must support +and duplex to be added as a members to the same bonding device. The bonding device +inherits these attributes from the first active member added to the bonding +device and then all further members added to the bonding device must support these parameters. -A bonding device must have a minimum of one slave before the bonding device +A bonding device must have a minimum of one member before the bonding device itself can be started. To use a bonding device dynamic RSS configuration feature effectively, it is -also required, that all slaves should be RSS-capable and support, at least one +also required, that all members should be RSS-capable and support, at least one common hash function available for each of them. Changing RSS key is only -possible, when all slave devices support the same key size. +possible, when all member devices support the same key size. -To prevent inconsistency on how slaves process packets, once a device is added +To prevent inconsistency on how members process packets, once a device is added to a bonding device, RSS and rte flow configurations should be managed through -the bonding device API, and not directly on the slave. +the bonding device API, and not directly on the member. Like all other PMD, all functions exported by a PMD are lock-free functions that are assumed not to be invoked in parallel on different logical cores to work on the same target object. It should also be noted that the PMD receive function should not be invoked -directly on a slave devices after they have been to a bonded device since -packets read directly from the slave device will no longer be available to the -bonded device to read. +directly on a member devices after they have been to a bonding device since +packets read directly from the member device will no longer be available to the +bonding device to read. Configuration ~~~~~~~~~~~~~ @@ -265,62 +265,62 @@ Configuration Link bonding devices are created using the ``rte_eth_bond_create`` API which requires a unique device name, the bonding mode, and the socket Id to allocate the bonding device's resources on. -The other configurable parameters for a bonded device are its slave devices, -its primary slave, a user defined MAC address and transmission policy to use if +The other configurable parameters for a bonding device are its member devices, +its primary member, a user defined MAC address and transmission policy to use if the device is in balance XOR mode. -Slave Devices -^^^^^^^^^^^^^ +Member Devices +^^^^^^^^^^^^^^ -Bonding devices support up to a maximum of ``RTE_MAX_ETHPORTS`` slave devices -of the same speed and duplex. Ethernet devices can be added as a slave to a -maximum of one bonded device. Slave devices are reconfigured with the -configuration of the bonded device on being added to a bonded device. +Bonding devices support up to a maximum of ``RTE_MAX_ETHPORTS`` member devices +of the same speed and duplex. Ethernet devices can be added as a member to a +maximum of one bonding device. Member devices are reconfigured with the +configuration of the bonding device on being added to a bonding device. -The bonded also guarantees to return the MAC address of the slave device to its -original value of removal of a slave from it. +The bonding also guarantees to return the MAC address of the member device to its +original value of removal of a member from it. -Primary Slave -^^^^^^^^^^^^^ +Primary Member +^^^^^^^^^^^^^^ -The primary slave is used to define the default port to use when a bonded +The primary member is used to define the default port to use when a bonding device is in active backup mode. A different port will only be used if, and only if, the current primary port goes down. If the user does not specify a -primary port it will default to being the first port added to the bonded device. +primary port it will default to being the first port added to the bonding device. MAC Address ^^^^^^^^^^^ -The bonded device can be configured with a user specified MAC address, this -address will be inherited by the some/all slave devices depending on the +The bonding device can be configured with a user specified MAC address, this +address will be inherited by the some/all member devices depending on the operating mode. If the device is in active backup mode then only the primary -device will have the user specified MAC, all other slaves will retain their -original MAC address. In mode 0, 2, 3, 4 all slaves devices are configure with -the bonded devices MAC address. +device will have the user specified MAC, all other members will retain their +original MAC address. In mode 0, 2, 3, 4 all members devices are configure with +the bonding devices MAC address. -If a user defined MAC address is not defined then the bonded device will -default to using the primary slaves MAC address. +If a user defined MAC address is not defined then the bonding device will +default to using the primary members MAC address. Balance XOR Transmit Policies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There are 3 supported transmission policies for bonded device running in +There are 3 supported transmission policies for bonding device running in Balance XOR mode. Layer 2, Layer 2+3, Layer 3+4. * **Layer 2:** Ethernet MAC address based balancing is the default transmission policy for Balance XOR bonding mode. It uses a simple XOR calculation on the source MAC address and destination MAC address of the - packet and then calculate the modulus of this value to calculate the slave + packet and then calculate the modulus of this value to calculate the member device to transmit the packet on. * **Layer 2 + 3:** Ethernet MAC address & IP Address based balancing uses a combination of source/destination MAC addresses and the source/destination - IP addresses of the data packet to decide which slave port the packet will + IP addresses of the data packet to decide which member port the packet will be transmitted on. * **Layer 3 + 4:** IP Address & UDP Port based balancing uses a combination of source/destination IP Address and the source/destination UDP ports of - the packet of the data packet to decide which slave port the packet will be + the packet of the data packet to decide which member port the packet will be transmitted on. All these policies support 802.1Q VLAN Ethernet packets, as well as IPv4, IPv6 @@ -350,17 +350,17 @@ device configure API ``rte_eth_dev_configure`` and then the RX and TX queues which will be used must be setup using ``rte_eth_tx_queue_setup`` / ``rte_eth_rx_queue_setup``. -Slave devices can be dynamically added and removed from a link bonding device -using the ``rte_eth_bond_slave_add`` / ``rte_eth_bond_slave_remove`` -APIs but at least one slave device must be added to the link bonding device +Member devices can be dynamically added and removed from a link bonding device +using the ``rte_eth_bond_member_add`` / ``rte_eth_bond_member_remove`` +APIs but at least one member device must be added to the link bonding device before it can be started using ``rte_eth_dev_start``. -The link status of a bonded device is dictated by that of its slaves, if all -slave device link status are down or if all slaves are removed from the link +The link status of a bonding device is dictated by that of its members, if all +member device link status are down or if all members are removed from the link bonding device then the link status of the bonding device will go down. It is also possible to configure / query the configuration of the control -parameters of a bonded device using the provided APIs +parameters of a bonding device using the provided APIs ``rte_eth_bond_mode_set/ get``, ``rte_eth_bond_primary_set/get``, ``rte_eth_bond_mac_set/reset`` and ``rte_eth_bond_xmit_policy_set/get``. @@ -390,9 +390,9 @@ long as the following two rules are respected: where X can be any combination of numbers and/or letters, and the name is no greater than 32 characters long. -* A least one slave device is provided with for each bonded device definition. +* A least one member device is provided with for each bonding device definition. -* The operation mode of the bonded device being created is provided. +* The operation mode of the bonding device being created is provided. The different options are: @@ -404,42 +404,42 @@ The different options are: mode=2 -* slave: Defines the PMD device which will be added as slave to the bonded +* member: Defines the PMD device which will be added as member to the bonding device. This option can be selected multiple times, for each device to be - added as a slave. Physical devices should be specified using their PCI + added as a member. Physical devices should be specified using their PCI address, in the format domain:bus:devid.function .. code-block:: console - slave=0000:0a:00.0,slave=0000:0a:00.1 + member=0000:0a:00.0,member=0000:0a:00.1 -* primary: Optional parameter which defines the primary slave port, - is used in active backup mode to select the primary slave for data TX/RX if +* primary: Optional parameter which defines the primary member port, + is used in active backup mode to select the primary member for data TX/RX if it is available. The primary port also is used to select the MAC address to - use when it is not defined by the user. This defaults to the first slave - added to the device if it is specified. The primary device must be a slave - of the bonded device. + use when it is not defined by the user. This defaults to the first member + added to the device if it is specified. The primary device must be a member + of the bonding device. .. code-block:: console primary=0000:0a:00.0 * socket_id: Optional parameter used to select which socket on a NUMA device - the bonded devices resources will be allocated on. + the bonding devices resources will be allocated on. .. code-block:: console socket_id=0 * mac: Optional parameter to select a MAC address for link bonding device, - this overrides the value of the primary slave device. + this overrides the value of the primary member device. .. code-block:: console mac=00:1e:67:1d:fd:1d * xmit_policy: Optional parameter which defines the transmission policy when - the bonded device is in balance mode. If not user specified this defaults + the bonding device is in balance mode. If not user specified this defaults to l2 (layer 2) forwarding, the other transmission policies available are l23 (layer 2+3) and l34 (layer 3+4) @@ -474,29 +474,29 @@ The different options are: Examples of Usage ^^^^^^^^^^^^^^^^^ -Create a bonded device in round robin mode with two slaves specified by their PCI address: +Create a bonding device in round robin mode with two members specified by their PCI address: .. code-block:: console - .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0,slave=0000:0a:00.01,slave=0000:04:00.00' -- --port-topology=chained + .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0,member=0000:0a:00.01,member=0000:04:00.00' -- --port-topology=chained -Create a bonded device in round robin mode with two slaves specified by their PCI address and an overriding MAC address: +Create a bonding device in round robin mode with two members specified by their PCI address and an overriding MAC address: .. code-block:: console - .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0,slave=0000:0a:00.01,slave=0000:04:00.00,mac=00:1e:67:1d:fd:1d' -- --port-topology=chained + .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0,member=0000:0a:00.01,member=0000:04:00.00,mac=00:1e:67:1d:fd:1d' -- --port-topology=chained -Create a bonded device in active backup mode with two slaves specified, and a primary slave specified by their PCI addresses: +Create a bonding device in active backup mode with two members specified, and a primary member specified by their PCI addresses: .. code-block:: console - .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=1,slave=0000:0a:00.01,slave=0000:04:00.00,primary=0000:0a:00.01' -- --port-topology=chained + .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=1,member=0000:0a:00.01,member=0000:04:00.00,primary=0000:0a:00.01' -- --port-topology=chained -Create a bonded device in balance mode with two slaves specified by their PCI addresses, and a transmission policy of layer 3 + 4 forwarding: +Create a bonding device in balance mode with two members specified by their PCI addresses, and a transmission policy of layer 3 + 4 forwarding: .. code-block:: console - .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=2,slave=0000:0a:00.01,slave=0000:04:00.00,xmit_policy=l34' -- --port-topology=chained + .//app/dpdk-testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=2,member=0000:0a:00.01,member=0000:04:00.00,xmit_policy=l34' -- --port-topology=chained .. _bonding_testpmd_commands: @@ -505,40 +505,40 @@ Testpmd driver specific commands Some bonding driver specific features are integrated in testpmd. -create bonded device -~~~~~~~~~~~~~~~~~~~~ +create bonding device +~~~~~~~~~~~~~~~~~~~~~ Create a new bonding device:: - testpmd> create bonded device (mode) (socket) + testpmd> create bonding device (mode) (socket) -For example, to create a bonded device in mode 1 on socket 0:: +For example, to create a bonding device in mode 1 on socket 0:: - testpmd> create bonded device 1 0 - created new bonded device (port X) + testpmd> create bonding device 1 0 + created new bonding device (port X) -add bonding slave -~~~~~~~~~~~~~~~~~ +add bonding member +~~~~~~~~~~~~~~~~~~ Adds Ethernet device to a Link Bonding device:: - testpmd> add bonding slave (slave id) (port id) + testpmd> add bonding member (member id) (port id) For example, to add Ethernet device (port 6) to a Link Bonding device (port 10):: - testpmd> add bonding slave 6 10 + testpmd> add bonding member 6 10 -remove bonding slave -~~~~~~~~~~~~~~~~~~~~ +remove bonding member +~~~~~~~~~~~~~~~~~~~~~ -Removes an Ethernet slave device from a Link Bonding device:: +Removes an Ethernet member device from a Link Bonding device:: - testpmd> remove bonding slave (slave id) (port id) + testpmd> remove bonding member (member id) (port id) -For example, to remove Ethernet slave device (port 6) to a Link Bonding device (port 10):: +For example, to remove Ethernet member device (port 6) to a Link Bonding device (port 10):: - testpmd> remove bonding slave 6 10 + testpmd> remove bonding member 6 10 set bonding mode ~~~~~~~~~~~~~~~~ @@ -554,11 +554,11 @@ For example, to set the bonding mode of a Link Bonding device (port 10) to broad set bonding primary ~~~~~~~~~~~~~~~~~~~ -Set an Ethernet slave device as the primary device on a Link Bonding device:: +Set an Ethernet member device as the primary device on a Link Bonding device:: - testpmd> set bonding primary (slave id) (port id) + testpmd> set bonding primary (member id) (port id) -For example, to set the Ethernet slave device (port 6) as the primary port of a Link Bonding device (port 10):: +For example, to set the Ethernet member device (port 6) as the primary port of a Link Bonding device (port 10):: testpmd> set bonding primary 6 10 @@ -590,13 +590,13 @@ set bonding mon_period Set the link status monitoring polling period in milliseconds for a bonding device. -This adds support for PMD slave devices which do not support link status interrupts. +This adds support for PMD member devices which do not support link status interrupts. When the mon_period is set to a value greater than 0 then all PMD's which do not support link status ISR will be queried every polling interval to check if their link status has changed:: testpmd> set bonding mon_period (port_id) (value) -For example, to set the link status monitoring polling period of bonded device (port 5) to 150ms:: +For example, to set the link status monitoring polling period of bonding device (port 5) to 150ms:: testpmd> set bonding mon_period 5 150 @@ -604,7 +604,7 @@ For example, to set the link status monitoring polling period of bonded device ( set bonding lacp dedicated_queue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Enable dedicated tx/rx queues on bonding devices slaves to handle LACP control plane traffic +Enable dedicated tx/rx queues on bonding devices members to handle LACP control plane traffic when in mode 4 (link-aggregation-802.3ad):: testpmd> set bonding lacp dedicated_queues (port_id) (enable|disable) @@ -627,13 +627,13 @@ it also shows link-aggregation-802.3ad information if the link mode is mode 4:: testpmd> show bonding config (port id) For example, -to show the configuration a Link Bonding device (port 9) with 3 slave devices (1, 3, 4) +to show the configuration a Link Bonding device (port 9) with 3 member devices (1, 3, 4) in balance mode with a transmission policy of layer 2+3:: testpmd> show bonding config 9 - Dev basic: Bonding mode: BALANCE(2) Balance Xmit Policy: BALANCE_XMIT_POLICY_LAYER23 - Slaves (3): [1 3 4] - Active Slaves (3): [1 3 4] + Members (3): [1 3 4] + Active Members (3): [1 3 4] Primary: [3] diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst index 5bc998a4338..b5d4b0e929a 100644 --- a/doc/guides/prog_guide/rte_flow.rst +++ b/doc/guides/prog_guide/rte_flow.rst @@ -1566,6 +1566,13 @@ Matches an InfiniBand base transport header in RoCE packet. - ``hdr``: InfiniBand base transport header definition (``rte_ib.h``). +Item: ``PTYPE`` +^^^^^^^^^^^^^^^ + +Matches the packet type as defined in rte_mbuf_ptype. + +- ``packet_type``: L2/L3/L4 and tunnel information. + Actions ~~~~~~~ @@ -3490,6 +3497,15 @@ The ``quota`` value is reduced according to ``mode`` setting. | ``RTE_FLOW_QUOTA_MODE_L3`` | Count packet bytes starting from L3 | +------------------+----------------------------------------------------+ +Action: ``SEND_TO_KERNEL`` +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Send packets to the kernel, without going to userspace at all. + +The packets will be received by the kernel driver sharing the same device +as the DPDK port on which this action is configured. + + Negative types ~~~~~~~~~~~~~~ @@ -3758,6 +3774,36 @@ Information about the number of available resources can be retrieved via struct rte_flow_queue_info *queue_info, struct rte_flow_error *error); +Group Miss Actions +~~~~~~~~~~~~~~~~~~ + +In an application, many flow rules share common group attributes, meaning they can be grouped and +classified together. A user can explicitly specify a set of actions performed on a packet when it +did not match any flows rules in a group using the following API: + +.. code-block:: c + + int + rte_flow_group_set_miss_actions(uint16_t port_id, + uint32_t group_id, + const struct rte_flow_group_attr *attr, + const struct rte_flow_action actions[], + struct rte_flow_error *error); + +For example, to configure a RTE_FLOW_TYPE_JUMP action as a miss action for ingress group 1: + +.. code-block:: c + + struct rte_flow_group_attr attr = {.ingress = 1}; + struct rte_flow_action act[] = { + /* Setting miss actions to jump to group 3 */ + [0] = {.type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &(struct rte_flow_action_jump){.group = 3}}, + [1] = {.type = RTE_FLOW_ACTION_TYPE_END}, + }; + struct rte_flow_error err; + rte_flow_group_set_miss_actions(port, 1, &attr, act, &err); + Flow templates ~~~~~~~~~~~~~~ @@ -4134,6 +4180,23 @@ User data may be provided during a flow creation/destruction in order to distinguish between multiple operations. User data is returned as part of the result to provide a method to detect which operation is completed. +Calculate hash +~~~~~~~~~~~~~~ + +Calculating hash of a packet in SW as it would be calculated in HW. + +The application can use this function to calculate the hash of a given packet +as it would be calculated in the HW. + +.. code-block:: c + + int + rte_flow_calc_table_hash(uint16_t port_id, + const struct rte_flow_template_table *table, + const struct rte_flow_item pattern[], + uint8_t pattern_template_index, + uint32_t *hash, struct rte_flow_error *error); + .. _flow_isolated_mode: Flow isolated mode diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst index 7418e35c1b5..4d45e401396 100644 --- a/doc/guides/prog_guide/rte_security.rst +++ b/doc/guides/prog_guide/rte_security.rst @@ -399,6 +399,82 @@ The API ``rte_security_macsec_sc_create`` returns a handle for SC, and this handle is set in ``rte_security_macsec_xform`` to create a MACsec session using ``rte_security_session_create``. +TLS-Record Protocol +~~~~~~~~~~~~~~~~~~~ + +The Transport Layer Protocol provides communications security over the Internet. +The protocol allows client/server applications to communicate in a way +that is designed to prevent eavesdropping, tampering, or message forgery. + +TLS protocol is composed of two layers: the TLS Record Protocol and the TLS Handshake Protocol. +At the lowest level, layered on top of some reliable transport protocol (e.g., TCP), +is the TLS Record Protocol. +The TLS Record Protocol provides connection security that has two basic properties: + + - The connection is private. + Symmetric cryptography is used for data encryption (e.g., AES, DES, etc.). + The keys for this symmetric encryption are generated uniquely + for each connection and are based on a secret negotiated during TLS Handshake Protocol. + The Record Protocol can also be used without encryption. + + - The connection is reliable. + Message transport includes a message integrity check using a keyed MAC. + Secure hash functions (e.g., SHA-1, etc.) are used for MAC computations. + The Record Protocol can operate without a MAC when it is being used as a transport + for negotiating security parameters by another protocol. + +.. code-block:: c + + Record Write Record Read + ------------ ----------- + + TLSPlaintext TLSCiphertext + | | + ~ ~ + | | + V V + +----------|-----------+ +----------|-----------+ + | Generate sequence no.| | Generate sequence no.| + +----------|-----------+ +----------------------+ + | | AR check (DTLS) | + +----------|-----------+ +----------|-----------+ + | Insert TLS header | | + | & trailer. | +----------|-----------+ + | (including padding) | | Decrypt & MAC verify | + +----------|-----------+ +----------|-----------+ + | | + +---------|-----------+ +----------|-----------+ + | MAC generate & | | Remove TLS header | + | Encrypt | | & trailer. | + +---------|-----------+ | (including padding) | + | +----------|-----------+ + | | + ~ ~ + | | + V V + TLSCiphertext TLSPlaintext + +TLS and DTLS header formation (in record write operation) +would depend on type of content. +It is a per packet variable and would need to be handled by the same session. +Application may pass this info to a cryptodev performing lookaside protocol offload +by passing the same in ``rte_crypto_op.param1``. + +In record read operation, application is required to preserve any info +it may need from the TLS/DTLS header (such as content type and sequence number) +as the cryptodev would remove the header and padding +as part of the lookaside protocol processing. +With TLS 1.3, the actual content type is part of the trailer (before padding) +and would be stripped by the PMD. +For applications that may need this info, +PMD would return the value in ``rte_crypto_op.param1`` field. + +Supported Versions +^^^^^^^^^^^^^^^^^^ + +* TLS 1.2 +* TLS 1.3 +* DTLS 1.2 Device Features and Capabilities --------------------------------- @@ -637,7 +713,7 @@ And the session mempool object size should be enough to accommodate Once the session mempools have been created, ``rte_security_session_create()`` is used to allocate and initialize a session for the required crypto/ethernet device. -Session APIs need a parameter ``rte_security_ctx`` to identify the crypto/ethernet +Session APIs need an opaque handle to identify the crypto/ethernet security ops. This parameter can be retrieved using the APIs ``rte_cryptodev_get_sec_ctx()`` (for crypto device) or ``rte_eth_dev_get_sec_ctx`` (for ethernet port). @@ -671,72 +747,27 @@ Security session configuration Security Session configuration structure is defined as ``rte_security_session_conf`` -.. code-block:: c - - struct rte_security_session_conf { - enum rte_security_session_action_type action_type; - /**< Type of action to be performed on the session */ - enum rte_security_session_protocol protocol; - /**< Security protocol to be configured */ - union { - struct rte_security_ipsec_xform ipsec; - struct rte_security_macsec_xform macsec; - struct rte_security_pdcp_xform pdcp; - struct rte_security_docsis_xform docsis; - }; - /**< Configuration parameters for security session */ - struct rte_crypto_sym_xform *crypto_xform; - /**< Security Session Crypto Transformations */ - void *userdata; - /**< Application specific userdata to be saved with session */ - }; +.. literalinclude:: ../../../lib/security/rte_security.h + :language: c + :start-after: Structure rte_security_session_conf 8< + :end-before: >8 End of structure rte_security_session_conf. The configuration structure reuses the ``rte_crypto_sym_xform`` struct for crypto related configuration. The ``rte_security_session_action_type`` struct is used to specify whether the session is configured for Lookaside Protocol offload or Inline Crypto or Inline Protocol Offload. -.. code-block:: c - - enum rte_security_session_action_type { - RTE_SECURITY_ACTION_TYPE_NONE, - /**< No security actions */ - RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, - /**< Crypto processing for security protocol is processed inline - * during transmission - */ - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, - /**< All security protocol processing is performed inline during - * transmission - */ - RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, - /**< All security protocol processing including crypto is performed - * on a lookaside accelerator - */ - RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO - /**< Similar to ACTION_TYPE_NONE but crypto processing for security - * protocol is processed synchronously by a CPU. - */ - }; +.. literalinclude:: ../../../lib/security/rte_security.h + :language: c + :start-after: Enumeration of rte_security_session_action_type 8< + :end-before: >8 End enumeration of rte_security_session_action_type. The ``rte_security_session_protocol`` is defined as -.. code-block:: c - - enum rte_security_session_protocol { - RTE_SECURITY_PROTOCOL_IPSEC = 1, - /**< IPsec Protocol */ - RTE_SECURITY_PROTOCOL_MACSEC, - /**< MACSec Protocol */ - RTE_SECURITY_PROTOCOL_PDCP, - /**< PDCP Protocol */ - RTE_SECURITY_PROTOCOL_DOCSIS, - /**< DOCSIS Protocol */ - }; - -Currently the library defines configuration parameters for IPsec and PDCP only. -For other protocols like MACSec, structures and enums are defined as place holders -which will be updated in the future. +.. literalinclude:: ../../../lib/security/rte_security.h + :language: c + :start-after: Enumeration of rte_security_session_protocol 8< + :end-before: >8 End enumeration of rte_security_session_protocol. IPsec related configuration parameters are defined in ``rte_security_ipsec_xform`` @@ -746,6 +777,8 @@ PDCP related configuration parameters are defined in ``rte_security_pdcp_xform`` DOCSIS related configuration parameters are defined in ``rte_security_docsis_xform`` +TLS record related configuration parameters are defined in ``rte_security_tls_record_xform`` + Security API ~~~~~~~~~~~~ diff --git a/doc/guides/rawdevs/cnxk_bphy.rst b/doc/guides/rawdevs/cnxk_bphy.rst index 2490912534f..dec3e8e17e3 100644 --- a/doc/guides/rawdevs/cnxk_bphy.rst +++ b/doc/guides/rawdevs/cnxk_bphy.rst @@ -19,6 +19,14 @@ The BPHY CGX/RPM implements following features in the rawdev API: - Access to BPHY CGX/RPM via a set of predefined messages - Access to BPHY memory - Custom interrupt handlers +- Multiprocess aware + +Limitations +----------- + +In multiprocess mode user-space application must ensure +no resources sharing takes place. +Otherwise, user-space application should ensure synchronization. Device Setup ------------ diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst index 317875c5054..81b93515cbd 100644 --- a/doc/guides/rel_notes/deprecation.rst +++ b/doc/guides/rel_notes/deprecation.rst @@ -17,9 +17,12 @@ Other API and ABI deprecation notices are to be posted below. Deprecation Notices ------------------- -* build: Enabling deprecated libraries - won't be possible anymore through the use of the ``disable_libs`` build option. - A new build option for deprecated libraries will be introduced instead. +* build: The ``enable_kmods`` option is deprecated and will be removed in a future release. + Setting/clearing the option has no impact on the build. + Instead, kernel modules will be always built for OS's where out-of-tree kernel modules + are required for DPDK operation. + Currently, this means that modules will only be built for FreeBSD. + No modules are shipped with DPDK for either Linux or Windows. * kvargs: The function ``rte_kvargs_process`` will get a new parameter for returning key match count. It will ease handling of no-match case. @@ -33,14 +36,6 @@ Deprecation Notices are renamed to ``rte_tel_data_add_array_uint`` and ``rte_tel_data_add_dict_uint`` respectively. As such, the old function names are deprecated and will be removed in a future release. -* eal: The functions ``rte_thread_setname`` and ``rte_ctrl_thread_create`` - are planned to be deprecated starting with the 23.07 release, subject to - the replacement API rte_thread_set_name and rte_thread_create_control being - marked as stable, and planned to be removed by the 23.11 release. - -* eal: ``RTE_CPUFLAG_NUMFLAGS`` will be removed in DPDK 23.11 release. - This is to allow new CPU features to be added without ABI breakage. - * rte_atomicNN_xxx: These APIs do not take memory order parameter. This does not allow for writing optimized code for all the CPU architectures supported in DPDK. DPDK has adopted the atomic operations from @@ -124,45 +119,11 @@ Deprecation Notices The legacy actions should be removed once ``MODIFY_FIELD`` alternative is implemented in drivers. -* bonding: The macro ``RTE_ETH_DEV_BONDED_SLAVE`` will be - deprecated in DPDK 23.07, and removed in DPDK 23.11. - The relevant code can be updated using ``RTE_ETH_DEV_BONDING_MEMBER``. - The data structure ``struct rte_eth_bond_8023ad_slave_info`` will be - renamed to ``struct rte_eth_bond_8023ad_member_info`` in DPDK 23.11. - The following functions will be removed in DPDK 23.11. - The old functions: - ``rte_eth_bond_8023ad_slave_info``, - ``rte_eth_bond_active_slaves_get``, - ``rte_eth_bond_slave_add``, - ``rte_eth_bond_slave_remove``, and - ``rte_eth_bond_slaves_get`` - will be replaced by: - ``rte_eth_bond_8023ad_member_info``, - ``rte_eth_bond_active_members_get``, - ``rte_eth_bond_member_add``, - ``rte_eth_bond_member_remove``, and - ``rte_eth_bond_members_get``. - * cryptodev: The function ``rte_cryptodev_cb_fn`` will be updated to have another parameter ``qp_id`` to return the queue pair ID which got error interrupt to the application, so that application can reset that particular queue pair. -* cryptodev: The arrays of algorithm strings ``rte_crypto_cipher_algorithm_strings``, - ``rte_crypto_auth_algorithm_strings``, ``rte_crypto_aead_algorithm_strings`` and - ``rte_crypto_asym_xform_strings`` are deprecated and will be removed in DPDK 23.11. - Application can use the new APIs ``rte_cryptodev_get_cipher_algo_string``, - ``rte_cryptodev_get_auth_algo_string``, ``rte_cryptodev_get_aead_algo_string`` and - ``rte_cryptodev_asym_get_xform_string`` respectively. - -* security: Hide structures ``rte_security_ops`` and ``rte_security_ctx`` - as these are internal to DPDK library and drivers. - -* security: New SA option ``ingress_oop`` would be added in structure - ``rte_security_ipsec_sa_options`` to support out of place processing - for inline inbound SA from DPDK 23.11. ``reserved_opts`` field in the - same struct would be removed as discussed in techboard meeting. - * eventdev: The single-event (non-burst) enqueue and dequeue operations, used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``, will be removed in DPDK 23.11. diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst index 4411bb32c19..0a6fc76a9d0 100644 --- a/doc/guides/rel_notes/release_23_11.rst +++ b/doc/guides/rel_notes/release_23_11.rst @@ -20,23 +20,6 @@ DPDK Release 23.11 ninja -C build doc xdg-open build/doc/guides/html/rel_notes/release_23_11.html -* Build Requirements: From DPDK 23.11 onwards, - building DPDK will require a C compiler which supports the C11 standard, - including support for C11 standard atomics. - - More specifically, the requirements will be: - - * Support for flag "-std=c11" (or similar) - * __STDC_NO_ATOMICS__ is *not defined* when using c11 flag - - Please note: - - * C11, including standard atomics, is supported from GCC version 5 onwards, - and is the default language version in that release - (Ref: https://gcc.gnu.org/gcc-5/changes.html) - * C11 is the default compilation mode in Clang from version 3.6, - which also added support for standard atomics - (Ref: https://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html) New Features ------------ @@ -72,6 +55,194 @@ New Features Also, make sure to start the actual text at the margin. ======================================================= +* **Build requirements increased for C11.** + + From DPDK 23.11 onwards, + building DPDK will require a C compiler which supports the C11 standard, + including support for C11 standard atomics. + + More specifically, the requirements will be: + + * Support for flag "-std=c11" (or similar) + * __STDC_NO_ATOMICS__ is *not defined* when using c11 flag + + Please note: + + * C11, including standard atomics, is supported from GCC version 5 onwards, + and is the default language version in that release + (Ref: https://gcc.gnu.org/gcc-5/changes.html) + * C11 is the default compilation mode in Clang from version 3.6, + which also added support for standard atomics + (Ref: https://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html) + +* **Added new build options.** + + * Enabling deprecated libraries is now done using + the new ``enable_deprecated_libraries`` build option. + * Optional libraries can now be selected with the new ``enable_libs`` + build option similarly to the existing ``enable_drivers`` build option. + +* **Introduced a new API for atomic operations.** + + This new API serves as a wrapper for transitioning + to standard atomic operations as described in the C11 standard. + This API implementation points at the compiler intrinsics by default. + The implementation using C11 standard atomic operations is enabled + via the ``enable_stdatomic`` build option. + +* **Added support for power intrinsics with AMD processors.** + +* **Added support for allow/block list in vmbus bus driver.*** + + The ``vmbus`` bus driver now supports -a and -b EAL options for selecting + devices. + +* **Added mbuf recycling support.** + + Added ``rte_eth_recycle_rx_queue_info_get`` and ``rte_eth_recycle_mbufs`` + functions which allow the user to copy used mbufs from the Tx mbuf ring + into the Rx mbuf ring. This feature supports the case that the Rx Ethernet + device is different from the Tx Ethernet device with respective driver + callback functions in ``rte_eth_recycle_mbufs``. + +* **Added amd-pstate driver support to the power management library.** + + Added support for amd-pstate driver which works on AMD EPYC processors. + +* **Added a flow action type for P4-defined actions.** + + For P4-programmable devices, hardware pipeline can be configured through + a new "PROG" action type and its associated custom arguments. + Such P4 pipeline, not using the standard blocks of the flow API, + can be managed with ``RTE_FLOW_ITEM_TYPE_FLEX`` and ``RTE_FLOW_ACTION_TYPE_PROG``. + +* **Added flow group set miss actions.** + + Introduced ``rte_flow_group_set_miss_actions()`` API to explicitly set + a group's miss actions, which are the actions to be performed on packets + that didn't match any of the flow rules in the group. + +* **Updated Intel cpfl driver.** + + * Added support for port representor. + * Added support for flow offload. + +* **Updated Intel iavf driver.** + + * Added support for iavf auto-reset. + +* **Updated Intel i40e driver.** + + * Added support for new X722 devices. + +* **Updated Marvell cnxk net driver.** + + * Added support for ``RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT`` flow item. + * Added support for ``RTE_FLOW_ACTION_TYPE_AGE`` flow action. + +* **Updated NVIDIA mlx5 net driver.** + + * Added support for Network Service Header (NSH) flow matching. + +* **Updated Solarflare net driver.** + + * Added support for transfer flow action ``INDIRECT`` with subtype ``VXLAN_ENCAP``. + * Supported packet replay (multi-count / multi-delivery) in transfer flows. + +* **Updated Netronome/Corigine nfp driver.** + + * Added inline IPsec offload based on the security framework. + +* **Updated Wangxun ngbe driver.** + + * Added 100M and auto-neg support in YT PHY fiber mode. + +* **Added support for TLS and DTLS record processing.** + + Added TLS and DTLS record transform for security session + and added enhancements to ``rte_crypto_op`` fields + to handle all datapath requirements of TLS and DTLS. + The support was added for TLS 1.2, TLS 1.3 and DTLS 1.2. + +* **Added out of place processing support for inline ingress security session.** + + Similar to out of place processing support for lookaside security session, + added the same support for inline ingress security session. + +* **Added security Rx inject API.** + + Added Rx inject API to allow applications to submit packets + for protocol offload and have them injected back to ethdev Rx + so that further ethdev Rx actions (IP reassembly, packet parsing and flow lookups) + can happen based on inner packet. + + The API when implemented by an ethdev, application would be able to process + packets that are received without/failed inline offload processing + (such as fragmented ESP packets with inline IPsec offload). + The API when implemented by a cryptodev, can be used for injecting packets + to ethdev Rx after IPsec processing and take advantage of ethdev Rx actions + for the inner packet which cannot be accelerated in inline protocol offload mode. + +* **Updated cryptodev scheduler driver.** + + * Added support for DOCSIS security protocol + through the ``rte_security`` API callbacks. + +* **Updated ipsec_mb crypto driver.** + + * Added support for digest encrypted to AESNI_MB asynchronous crypto driver. + +* **Updated Intel QuickAssist Technology driver.** + + * Enabled support for QAT 2.0c (4944) devices in QAT crypto driver. + +* **Updated Marvell cnxk crypto driver.** + + * Added SM2 algorithm support in asymmetric crypto operations. + +* **Updated Intel vRAN Boost baseband driver.** + + * Added support for the new Intel vRAN Boost v2 device variant (GNR-D) + within the unified driver. + +* **Added support for models with multiple I/O in mldev library.** + + Added support in mldev library for models with multiple inputs and outputs. + +* **Added new eventdev Ethernet Rx adapter create API.** + + Added new function ``rte_event_eth_rx_adapter_create_ext_with_params()`` + for creating Rx adapter instance for the applications desire to + control both the event port allocation and event buffer size. + +* **Added event DMA adapter library.** + + * Added the Event DMA Adapter Library. This library extends the event-based + model by introducing APIs that allow applications to enqueue/dequeue DMA + operations to/from dmadev as events scheduled by an event device. + +* **Added eventdev support to link queues to port with link profile.** + + Introduced event link profiles that can be used to associated links between + event queues and an event port with a unique identifier termed as link profile. + The profile can be used to switch between the associated links in fast-path + without the additional overhead of linking/unlinking and waiting for unlinking. + + * Added ``rte_event_port_profile_links_set``, ``rte_event_port_profile_unlink`` + ``rte_event_port_profile_links_get`` and ``rte_event_port_profile_switch`` + functions to enable this feature. + +* **Updated Marvell cnxk eventdev driver.** + + * Added support for ``remaining_ticks_get`` timer adapter PMD callback + to get the remaining ticks to expire for a given event timer. + * Added link profiles support, up to two link profiles are supported. + +* **Added dispatcher library.** + + Added dispatcher library which purpose is to help decouple different + parts (modules) of an eventdev-based application. + Removed Items ------------- @@ -87,10 +258,25 @@ Removed Items * eal: Removed deprecated ``RTE_FUNC_PTR_OR_*`` macros. +* ethdev: Removed deprecated macro ``RTE_ETH_DEV_BONDED_SLAVE``. + * flow_classify: Removed flow classification library and examples. * kni: Removed the Kernel Network Interface (KNI) library and driver. +* cryptodev: Removed the arrays of algorithm strings ``rte_crypto_cipher_algorithm_strings``, + ``rte_crypto_auth_algorithm_strings``, ``rte_crypto_aead_algorithm_strings`` and + ``rte_crypto_asym_xform_strings``. + +* cryptodev: Removed explicit SM2 xform parameter in asymmetric xform. + +* security: Removed deprecated field ``reserved_opts`` + from struct ``rte_security_ipsec_sa_options``. + +* mldev: Removed functions ``rte_ml_io_input_size_get`` and ``rte_ml_io_output_size_get``. + +* cmdline: Removed broken and unused function ``cmdline_poll``. + API Changes ----------- @@ -107,6 +293,46 @@ API Changes Also, make sure to start the actual text at the margin. ======================================================= +* eal: The thread API has changed. + The function ``rte_thread_create_control()`` does not take attributes anymore. + The whole thread API was promoted to stable level, + except ``rte_thread_setname()`` and ``rte_ctrl_thread_create()`` which are + replaced with ``rte_thread_set_name()`` and ``rte_thread_create_control()``. + +* eal: Removed ``RTE_CPUFLAG_NUMFLAGS`` to avoid misusage and theoretical ABI + compatibility issue when adding new cpuflags. + +* power: Updated the x86 Uncore power management API so that it is vendor agnostic. + +* bonding: Replaced master/slave to main/member. The data structure + ``struct rte_eth_bond_8023ad_slave_info`` was renamed to + ``struct rte_eth_bond_8023ad_member_info`` in DPDK 23.11. + The following functions were removed in DPDK 23.11. + The old functions: + ``rte_eth_bond_8023ad_slave_info``, + ``rte_eth_bond_active_slaves_get``, + ``rte_eth_bond_slave_add``, + ``rte_eth_bond_slave_remove``, and + ``rte_eth_bond_slaves_get`` + will be replaced by: + ``rte_eth_bond_8023ad_member_info``, + ``rte_eth_bond_active_members_get``, + ``rte_eth_bond_member_add``, + ``rte_eth_bond_member_remove``, and + ``rte_eth_bond_members_get``. + +* cryptodev: The elliptic curve asymmetric private and public keys can be maintained + per session. These keys are moved from per packet ``rte_crypto_ecdsa_op_param`` and + ``rte_crypto_sm2_op_param`` to generic EC xform ``rte_crypto_ec_xform``. + +* security: Structures ``rte_security_ops`` and ``rte_security_ctx`` were moved to + internal library headers not visible to application. + +* mldev: Updated the structure ``rte_ml_model_info`` to support input and output + with arbitrary shapes. + Updated ``rte_ml_op``, ``rte_ml_io_quantize`` and ``rte_ml_io_dequantize`` + to support an array of ``rte_ml_buff_seg``. + ABI Changes ----------- @@ -123,6 +349,17 @@ ABI Changes Also, make sure to start the actual text at the margin. ======================================================= +* ethdev: Added ``recycle_tx_mbufs_reuse`` and ``recycle_rx_descriptors_refill`` + fields to ``rte_eth_dev`` structure. + +* ethdev: Structure ``rte_eth_fp_ops`` was affected to add + ``recycle_tx_mbufs_reuse`` and ``recycle_rx_descriptors_refill`` + fields, to move ``rxq`` and ``txq`` fields, to change the size of + ``reserved1`` and ``reserved2`` fields. + +* security: struct ``rte_security_ipsec_sa_options`` was updated + due to inline out-of-place feature addition. + Known Issues ------------ diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index 6e9c552e769..24a086401ee 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -232,6 +232,7 @@ The command line options are: noisy 5tswap shared-rxq + recycle_mbufs * ``--rss-ip`` diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst index a182479ab27..ff73d65a329 100644 --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst @@ -318,7 +318,7 @@ set fwd Set the packet forwarding mode:: testpmd> set fwd (io|mac|macswap|flowgen| \ - rxonly|txonly|csum|icmpecho|noisy|5tswap|shared-rxq) (""|retry) + rxonly|txonly|csum|icmpecho|noisy|5tswap|shared-rxq|recycle_mbufs) (""|retry) ``retry`` can be specified for forwarding engines except ``rx_only``. @@ -364,6 +364,9 @@ The available information categories are: * ``shared-rxq``: Receive only for shared Rx queue. Resolve packet source port from mbuf and update stream statistics accordingly. +* ``recycle_mbufs``: Recycle Tx queue used mbufs for Rx queue mbuf ring. + This mode uses fast path mbuf recycle feature and forwards packets in I/O mode. + Example:: testpmd> set fwd rxonly @@ -1320,6 +1323,13 @@ filtered by port:: testpmd> mcast_addr remove (port_id) (mcast_addr) +mcast_addr flush +~~~~~~~~~~~~~~~~ + +Flush all multicast MAC addresses on port_id:: + + testpmd> mcast_addr flush (port_id) + mac_addr add (for VF) ~~~~~~~~~~~~~~~~~~~~~ @@ -1907,7 +1917,7 @@ For example, to attach a port created by pcap PMD. In this case, identifier is ``net_pcap0``. This identifier format is the same as ``--vdev`` format of DPDK applications. -For example, to re-attach a bonded port which has been previously detached, +For example, to re-attach a bonding port which has been previously detached, the mode and slave parameters must be given. .. code-block:: console @@ -1915,7 +1925,7 @@ the mode and slave parameters must be given. testpmd> port attach net_bond_0,mode=0,slave=1 Attaching a new port... EAL: Initializing pmd_bond for net_bond_0 - EAL: Create bonded device net_bond_0 on port 0 in mode 0 on socket 0. + EAL: Create bonding device net_bond_0 on port 0 in mode 0 on socket 0. Port 0 is attached. Now total ports is 1 Done @@ -3273,6 +3283,28 @@ The usual error message is shown when operations results cannot be pulled:: Caught error type [...] ([...]): [...] +Calculating hash +~~~~~~~~~~~~~~~~ + +``flow hash`` calculates the hash for a given pattern. +It is bound to ``rte_flow_calc_table_hash()``:: + + flow hash {port_id} template_table {table_id} + pattern_template {pattern_template_index} + actions_template {actions_template_index} + pattern {item} [/ {item} [...]] / end + +If successful, it will show the calculated hash result as seen below:: + + Hash results 0x[...] + +Otherwise, it will show an error message of the form:: + + Caught error type [...] ([...]): [...] + +This command uses the same pattern items as ``flow create``, +their format is described in `Creating flow rules`_. + Creating a tunnel stub for offload ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -3805,6 +3837,10 @@ This section lists supported pattern items and their attributes, if any. - ``send_to_kernel``: send packets to kernel. +- ``ptype``: match the packet type (L2/L3/L4 and tunnel information). + + - ``packet_type {unsigned}``: packet type. + Actions list ^^^^^^^^^^^^ @@ -4094,6 +4130,27 @@ This section lists supported actions and their attributes, if any. - ``mtr_init_color {value}``: initial color value (green/yellow/red) - ``mtr_state {unsigned}``: meter state (disabled/enabled) +- ``modify_field``: Modify packet field + + - ``op``: modify operation (set/add/sub) + - ``dst_type``: the destination field to be modified, the supported fields as + ``enum rte_flow_field_id`` listed. + - ``dst_level``: destination field level. + - ``dst_tag_index``: destination field tag array. + - ``dst_type_id``: destination field type ID. + - ``dst_class``: destination field class ID. + - ``dst_offset``: destination field bit offset. + - ``src_type``: the modify source field, the supported fields as + ``enum rte_flow_field_id`` listed. + - ``src_level``: source field level. + - ``src_tag_index``: source field tag array. + - ``src_type_id``: source field type ID. + - ``src_class``: source field class ID. + - ``src_offset``: source field bit offset. + - ``src_value``: source immediate value. + - ``src_ptr``: pointer to source immediate value. + - ``width``: number of bits to copy. + Destroying flow rules ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/guides/tools/dts.rst b/doc/guides/tools/dts.rst index 2f97d1df6e3..32c18ee4729 100644 --- a/doc/guides/tools/dts.rst +++ b/doc/guides/tools/dts.rst @@ -82,6 +82,7 @@ Setting up DTS environment Another benefit is the usage of ``pyproject.toml``, which has become the standard config file for python projects, improving project organization. To install Poetry, visit their `doc pages `_. + The recommended Poetry version is at least 1.5.1. #. **Getting a Poetry shell** diff --git a/doc/guides/tools/testmldev.rst b/doc/guides/tools/testmldev.rst index 741abd722e2..9b1565a4576 100644 --- a/doc/guides/tools/testmldev.rst +++ b/doc/guides/tools/testmldev.rst @@ -106,11 +106,6 @@ The following are the command-line options supported by the test application. Queue size would translate into ``rte_ml_dev_qp_conf::nb_desc`` field during queue-pair creation. Default value is ``1``. -``--batches `` - Set the number batches in the input file provided for inference run. - When not specified, the test would assume the number of batches - is the batch size of the model. - ``--tolerance `` Set the tolerance value in percentage to be used for output validation. Default value is ``0``. @@ -282,7 +277,6 @@ Supported command line options for inference tests are following:: --burst_size --queue_pairs --queue_size - --batches --tolerance --stats diff --git a/drivers/baseband/acc/acc100_pmd.h b/drivers/baseband/acc/acc100_pmd.h index a48298650c2..5a8965fa539 100644 --- a/drivers/baseband/acc/acc100_pmd.h +++ b/drivers/baseband/acc/acc100_pmd.h @@ -34,6 +34,8 @@ #define ACC100_VENDOR_ID (0x8086) #define ACC100_PF_DEVICE_ID (0x0d5c) #define ACC100_VF_DEVICE_ID (0x0d5d) +#define VRB1_PF_DEVICE_ID (0x57C0) +#define VRB2_PF_DEVICE_ID (0x57C2) /* Values used in writing to the registers */ #define ACC100_REG_IRQ_EN_ALL 0x1FF83FF /* Enable all interrupts */ diff --git a/drivers/baseband/acc/acc_common.h b/drivers/baseband/acc/acc_common.h index 5bb00746c3c..bda2ad2f7a7 100644 --- a/drivers/baseband/acc/acc_common.h +++ b/drivers/baseband/acc/acc_common.h @@ -18,6 +18,7 @@ #define ACC_DMA_BLKID_OUT_HARQ 3 #define ACC_DMA_BLKID_IN_HARQ 3 #define ACC_DMA_BLKID_IN_MLD_R 3 +#define ACC_DMA_BLKID_DEWIN_IN 3 /* Values used in filling in decode FCWs */ #define ACC_FCW_TD_VER 1 @@ -87,6 +88,7 @@ #define ACC_FCW_LE_BLEN 32 #define ACC_FCW_LD_BLEN 36 #define ACC_FCW_FFT_BLEN 28 +#define ACC_FCW_MLDTS_BLEN 32 #define ACC_5GUL_SIZE_0 16 #define ACC_5GUL_SIZE_1 40 #define ACC_5GUL_OFFSET_0 36 @@ -101,6 +103,10 @@ #define ACC_NUM_QGRPS_PER_WORD 8 #define ACC_MAX_NUM_QGRPS 32 #define ACC_RING_SIZE_GRANULARITY 64 +#define ACC_MAX_FCW_SIZE 128 +#define ACC_IQ_SIZE 4 + +#define ACC_FCW_FFT_BLEN_3 28 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */ #define ACC_N_ZC_1 66 /* N = 66 Zc for BG 1 */ @@ -130,6 +136,19 @@ #define ACC_LIM_21 14 /* 0.21 */ #define ACC_LIM_31 20 /* 0.31 */ #define ACC_MAX_E (128 * 1024 - 2) +#define ACC_MAX_CS 12 + +#define ACC100_VARIANT 0 +#define VRB1_VARIANT 2 +#define VRB2_VARIANT 3 + +/* Queue Index Hierarchy */ +#define VRB1_GRP_ID_SHIFT 10 +#define VRB1_VF_ID_SHIFT 4 +#define VRB2_GRP_ID_SHIFT 12 +#define VRB2_VF_ID_SHIFT 6 + +#define ACC_MAX_FFT_WIN 16 /* Helper macro for logging */ #define rte_acc_log(level, fmt, ...) \ @@ -330,6 +349,37 @@ struct __rte_packed acc_fcw_fft { res:19; }; +/* FFT Frame Control Word. */ +struct __rte_packed acc_fcw_fft_3 { + uint32_t in_frame_size:16, + leading_pad_size:16; + uint32_t out_frame_size:16, + leading_depad_size:16; + uint32_t cs_window_sel; + uint32_t cs_window_sel2:16, + cs_enable_bmap:16; + uint32_t num_antennas:8, + idft_size:8, + dft_size:8, + cs_offset:8; + uint32_t idft_shift:8, + dft_shift:8, + cs_multiplier:16; + uint32_t bypass:2, + fp16_in:1, + fp16_out:1, + exp_adj:4, + power_shift:4, + power_en:1, + enable_dewin:1, + freq_resample_mode:2, + depad_output_size:16; + uint16_t cs_theta_0[ACC_MAX_CS]; + uint32_t cs_theta_d[ACC_MAX_CS]; + int8_t cs_time_offset[ACC_MAX_CS]; +}; + + /* MLD-TS Frame Control Word */ struct __rte_packed acc_fcw_mldts { uint32_t fcw_version:4, @@ -471,14 +521,14 @@ union acc_info_ring_data { uint16_t valid: 1; }; struct { - uint32_t aq_id_3: 6; - uint32_t qg_id_3: 5; - uint32_t vf_id_3: 6; - uint32_t int_nb_3: 6; - uint32_t msi_0_3: 1; - uint32_t vf2pf_3: 6; - uint32_t loop_3: 1; - uint32_t valid_3: 1; + uint32_t aq_id_vrb2: 6; + uint32_t qg_id_vrb2: 5; + uint32_t vf_id_vrb2: 6; + uint32_t int_nb_vrb2: 6; + uint32_t msi_0_vrb2: 1; + uint32_t vf2pf_vrb2: 6; + uint32_t loop_vrb2: 1; + uint32_t valid_vrb2: 1; }; } __rte_packed; @@ -512,6 +562,8 @@ struct acc_deq_intr_details { enum { ACC_VF2PF_STATUS_REQUEST = 1, ACC_VF2PF_USING_VF = 2, + ACC_VF2PF_LUT_VER_REQUEST = 3, + ACC_VF2PF_FFT_WIN_REQUEST = 4, }; @@ -558,6 +610,7 @@ struct acc_device { queue_offset_fun_t queue_offset; /* Device specific queue offset */ uint16_t num_qgroups; uint16_t num_aqs; + uint16_t fft_window_width[ACC_MAX_FFT_WIN]; /* FFT windowing size. */ }; /* Structure associated with each queue. */ @@ -581,13 +634,14 @@ struct __rte_cache_aligned acc_queue { uint32_t aq_enqueued; /* Count how many "batches" have been enqueued */ uint32_t aq_dequeued; /* Count how many "batches" have been dequeued */ uint32_t irq_enable; /* Enable ops dequeue interrupts if set to 1 */ - struct rte_mempool *fcw_mempool; /* FCW mempool */ enum rte_bbdev_op_type op_type; /* Type of this Queue: TE or TD */ /* Internal Buffers for loopback input */ uint8_t *lb_in; uint8_t *lb_out; + uint8_t *fcw_ring; rte_iova_t lb_in_addr_iova; rte_iova_t lb_out_addr_iova; + rte_iova_t fcw_ring_addr_iova; int8_t *derm_buffer; /* interim buffer for de-rm in SDK */ struct acc_device *d; }; @@ -755,22 +809,114 @@ alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d, free_base_addresses(base_addrs, i); } +/* Wrapper to provide VF index from ring data. */ +static inline uint16_t +vf_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return ring_data.vf_id_vrb2; + else + return ring_data.vf_id; +} + +/* Wrapper to provide QG index from ring data. */ +static inline uint16_t +qg_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return ring_data.qg_id_vrb2; + else + return ring_data.qg_id; +} + +/* Wrapper to provide AQ index from ring data. */ +static inline uint16_t +aq_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return ring_data.aq_id_vrb2; + else + return ring_data.aq_id; +} + +/* Wrapper to provide int index from ring data. */ +static inline uint16_t +int_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return ring_data.int_nb_vrb2; + else + return ring_data.int_nb; +} + +/* Wrapper to provide queue index from group and aq index. */ +static inline int +queue_index(uint16_t group_idx, uint16_t aq_idx, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return (group_idx << VRB2_GRP_ID_SHIFT) + aq_idx; + else + return (group_idx << VRB1_GRP_ID_SHIFT) + aq_idx; +} + +/* Wrapper to provide queue group from queue index. */ +static inline int +qg_from_q(uint32_t q_idx, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return (q_idx >> VRB2_GRP_ID_SHIFT) & 0x1F; + else + return (q_idx >> VRB1_GRP_ID_SHIFT) & 0xF; +} + +/* Wrapper to provide vf from queue index. */ +static inline int32_t +vf_from_q(uint32_t q_idx, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return (q_idx >> VRB2_VF_ID_SHIFT) & 0x3F; + else + return (q_idx >> VRB1_VF_ID_SHIFT) & 0x3F; +} + +/* Wrapper to provide aq index from queue index. */ +static inline int32_t +aq_from_q(uint32_t q_idx, uint16_t device_variant) +{ + if (device_variant == VRB2_VARIANT) + return q_idx & 0x3F; + else + return q_idx & 0xF; +} + +/* Wrapper to set VF index in ring data. */ +static inline int32_t +set_vf_in_ring(volatile union acc_info_ring_data *ring_data, + uint16_t device_variant, uint16_t value) +{ + if (device_variant == VRB2_VARIANT) + return ring_data->vf_id_vrb2 = value; + else + return ring_data->vf_id = value; +} + /* * Find queue_id of a device queue based on details from the Info Ring. * If a queue isn't found UINT16_MAX is returned. */ static inline uint16_t -get_queue_id_from_ring_info(struct rte_bbdev_data *data, - const union acc_info_ring_data ring_data) +get_queue_id_from_ring_info(struct rte_bbdev_data *data, const union acc_info_ring_data ring_data) { uint16_t queue_id; + struct acc_queue *acc_q; + struct acc_device *d = data->dev_private; for (queue_id = 0; queue_id < data->num_queues; ++queue_id) { - struct acc_queue *acc_q = - data->queues[queue_id].queue_private; - if (acc_q != NULL && acc_q->aq_id == ring_data.aq_id && - acc_q->qgrp_id == ring_data.qg_id && - acc_q->vf_id == ring_data.vf_id) + acc_q = data->queues[queue_id].queue_private; + + if (acc_q != NULL && acc_q->aq_id == aq_from_ring(ring_data, d->device_variant) && + acc_q->qgrp_id == qg_from_ring(ring_data, d->device_variant) && + acc_q->vf_id == vf_from_ring(ring_data, d->device_variant)) return queue_id; } @@ -1432,4 +1578,11 @@ get_num_cbs_in_tb_ldpc_enc(struct rte_bbdev_op_ldpc_enc *ldpc_enc) return cbs_in_tb; } +static inline void +acc_reg_fast_write(struct acc_device *d, uint32_t offset, uint32_t value) +{ + void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset); + mmio_write(reg_addr, value); +} + #endif /* _ACC_COMMON_H_ */ diff --git a/drivers/baseband/acc/meson.build b/drivers/baseband/acc/meson.build index 966cd65c799..27a654b5015 100644 --- a/drivers/baseband/acc/meson.build +++ b/drivers/baseband/acc/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2020 Intel Corporation -deps += ['bbdev', 'bus_pci'] +deps += ['bus_pci'] sources = files('rte_acc100_pmd.c', 'rte_vrb_pmd.c') diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c index 5362d39c302..8a9c296b016 100644 --- a/drivers/baseband/acc/rte_acc100_pmd.c +++ b/drivers/baseband/acc/rte_acc100_pmd.c @@ -1218,7 +1218,7 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, - op->ldpc_dec.n_filler); /* Alignment on next 64B - Already enforced from HC output */ - harq_in_length = RTE_ALIGN_FLOOR(harq_in_length, ACC_HARQ_ALIGN_64B); + harq_in_length = RTE_ALIGN_CEIL(harq_in_length, ACC_HARQ_ALIGN_64B); /* Stronger alignment requirement when in decompression mode */ if (fcw->hcin_decomp_mode > 0) @@ -5187,6 +5187,10 @@ rte_acc_configure(const char *dev_name, struct rte_acc_conf *conf) return acc100_configure(dev_name, conf); else if (pci_dev->id.device_id == ACC101_PF_DEVICE_ID) return acc101_configure(dev_name, conf); - else + else if (pci_dev->id.device_id == VRB1_PF_DEVICE_ID) return vrb1_configure(dev_name, conf); + else if (pci_dev->id.device_id == VRB2_PF_DEVICE_ID) + return vrb2_configure(dev_name, conf); + + return -ENXIO; } diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c index 9e5a73c9c77..ae230b828a0 100644 --- a/drivers/baseband/acc/rte_vrb_pmd.c +++ b/drivers/baseband/acc/rte_vrb_pmd.c @@ -37,7 +37,16 @@ vrb1_queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id return ((qgrp_id << 7) + (aq_id << 3) + VRB1_VfQmgrIngressAq); } -enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, FFT, NUM_ACC}; +static inline uint32_t +vrb2_queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id) +{ + if (pf_device) + return ((vf_id << 14) + (qgrp_id << 9) + (aq_id << 3) + VRB2_PfQmgrIngressAq); + else + return ((qgrp_id << 9) + (aq_id << 3) + VRB2_VfQmgrIngressAq); +} + +enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, FFT, MLD, NUM_ACC}; /* Return the accelerator enum for a Queue Group Index. */ static inline int @@ -53,6 +62,7 @@ accFromQgid(int qg_idx, const struct rte_acc_conf *acc_conf) NumQGroupsPerFn[DL_4G] = acc_conf->q_dl_4g.num_qgroups; NumQGroupsPerFn[DL_5G] = acc_conf->q_dl_5g.num_qgroups; NumQGroupsPerFn[FFT] = acc_conf->q_fft.num_qgroups; + NumQGroupsPerFn[MLD] = acc_conf->q_mld.num_qgroups; for (acc = UL_4G; acc < NUM_ACC; acc++) for (qgIdx = 0; qgIdx < NumQGroupsPerFn[acc]; qgIdx++) accQg[qgIndex++] = acc; @@ -83,6 +93,9 @@ qtopFromAcc(struct rte_acc_queue_topology **qtop, int acc_enum, struct rte_acc_c case FFT: p_qtop = &(acc_conf->q_fft); break; + case MLD: + p_qtop = &(acc_conf->q_mld); + break; default: /* NOTREACHED. */ rte_bbdev_log(ERR, "Unexpected error evaluating %s using %d", __func__, acc_enum); @@ -139,6 +152,9 @@ initQTop(struct rte_acc_conf *acc_conf) acc_conf->q_fft.num_aqs_per_groups = 0; acc_conf->q_fft.num_qgroups = 0; acc_conf->q_fft.first_qgroup_index = -1; + acc_conf->q_mld.num_aqs_per_groups = 0; + acc_conf->q_mld.num_qgroups = 0; + acc_conf->q_mld.first_qgroup_index = -1; } static inline void @@ -183,6 +199,37 @@ vrb_check_device_enable(struct rte_bbdev *dev) return false; } +static inline void +vrb_vf2pf(struct acc_device *d, unsigned int payload) +{ + acc_reg_write(d, d->reg_addr->vf2pf_doorbell, payload); +} + +/* Request device FFT windowing information. */ +static inline void +vrb_device_fft_win(struct rte_bbdev *dev) +{ + struct acc_device *d = dev->data->dev_private; + uint32_t reg, time_out = 0, win; + + if (d->pf_device) + return; + + /* Check from the device the first time. */ + if (d->fft_window_width[0] == 0) { + for (win = 0; win < ACC_MAX_FFT_WIN; win++) { + vrb_vf2pf(d, ACC_VF2PF_FFT_WIN_REQUEST | win); + reg = acc_reg_read(d, d->reg_addr->pf2vf_doorbell); + while ((time_out < ACC_STATUS_TO) && (reg == RTE_BBDEV_DEV_NOSTATUS)) { + usleep(ACC_STATUS_WAIT); /*< Wait or VF->PF->VF Comms. */ + reg = acc_reg_read(d, d->reg_addr->pf2vf_doorbell); + time_out++; + } + d->fft_window_width[win] = reg; + } + } +} + /* Fetch configuration enabled for the PF/VF using MMIO Read (slow). */ static inline void fetch_acc_config(struct rte_bbdev *dev) @@ -190,7 +237,7 @@ fetch_acc_config(struct rte_bbdev *dev) struct acc_device *d = dev->data->dev_private; struct rte_acc_conf *acc_conf = &d->acc_conf; uint8_t acc, qg; - uint32_t reg_aq, reg_len0, reg_len1, reg0, reg1; + uint32_t reg_aq, reg_len0, reg_len1, reg_len2, reg_len3, reg0, reg1, reg2, reg3; uint32_t reg_mode, idx; struct rte_acc_queue_topology *q_top = NULL; int qman_func_id[VRB_NUM_ACCS] = {ACC_ACCMAP_0, ACC_ACCMAP_1, @@ -206,38 +253,89 @@ fetch_acc_config(struct rte_bbdev *dev) return; } + vrb_device_fft_win(dev); + d->ddr_size = 0; /* Single VF Bundle by VF. */ acc_conf->num_vf_bundles = 1; initQTop(acc_conf); - reg0 = acc_reg_read(d, d->reg_addr->qman_group_func); - reg1 = acc_reg_read(d, d->reg_addr->qman_group_func + 4); - for (qg = 0; qg < d->num_qgroups; qg++) { - reg_aq = acc_reg_read(d, d->queue_offset(d->pf_device, 0, qg, 0)); - if (reg_aq & ACC_QUEUE_ENABLE) { - if (qg < ACC_NUM_QGRPS_PER_WORD) - idx = (reg0 >> (qg * 4)) & 0x7; + if (d->device_variant == VRB1_VARIANT) { + reg0 = acc_reg_read(d, d->reg_addr->qman_group_func); + reg1 = acc_reg_read(d, d->reg_addr->qman_group_func + 4); + for (qg = 0; qg < d->num_qgroups; qg++) { + reg_aq = acc_reg_read(d, d->queue_offset(d->pf_device, 0, qg, 0)); + if (reg_aq & ACC_QUEUE_ENABLE) { + if (qg < ACC_NUM_QGRPS_PER_WORD) + idx = (reg0 >> (qg * 4)) & 0x7; + else + idx = (reg1 >> ((qg - ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; + if (idx < VRB1_NUM_ACCS) { + acc = qman_func_id[idx]; + updateQtop(acc, qg, acc_conf, d); + } + } + } + + /* Check the depth of the AQs. */ + reg_len0 = acc_reg_read(d, d->reg_addr->depth_log0_offset); + reg_len1 = acc_reg_read(d, d->reg_addr->depth_log1_offset); + for (acc = 0; acc < NUM_ACC; acc++) { + qtopFromAcc(&q_top, acc, acc_conf); + if (q_top->first_qgroup_index < ACC_NUM_QGRPS_PER_WORD) + q_top->aq_depth_log2 = + (reg_len0 >> (q_top->first_qgroup_index * 4)) & 0xF; else - idx = (reg1 >> ((qg - ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; - if (idx < VRB1_NUM_ACCS) { - acc = qman_func_id[idx]; - updateQtop(acc, qg, acc_conf, d); + q_top->aq_depth_log2 = (reg_len1 >> ((q_top->first_qgroup_index - + ACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF; + } + } else { + reg0 = acc_reg_read(d, d->reg_addr->qman_group_func); + reg1 = acc_reg_read(d, d->reg_addr->qman_group_func + 4); + reg2 = acc_reg_read(d, d->reg_addr->qman_group_func + 8); + reg3 = acc_reg_read(d, d->reg_addr->qman_group_func + 12); + /* printf("Debug Function %08x %08x %08x %08x\n", reg0, reg1, reg2, reg3);*/ + for (qg = 0; qg < VRB2_NUM_QGRPS; qg++) { + reg_aq = acc_reg_read(d, vrb2_queue_offset(d->pf_device, 0, qg, 0)); + if (reg_aq & ACC_QUEUE_ENABLE) { + /* printf("Qg enabled %d %x\n", qg, reg_aq);*/ + if (qg / ACC_NUM_QGRPS_PER_WORD == 0) + idx = (reg0 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; + else if (qg / ACC_NUM_QGRPS_PER_WORD == 1) + idx = (reg1 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; + else if (qg / ACC_NUM_QGRPS_PER_WORD == 2) + idx = (reg2 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; + else + idx = (reg3 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; + if (idx < VRB_NUM_ACCS) { + acc = qman_func_id[idx]; + updateQtop(acc, qg, acc_conf, d); + } } } - } - /* Check the depth of the AQs. */ - reg_len0 = acc_reg_read(d, d->reg_addr->depth_log0_offset); - reg_len1 = acc_reg_read(d, d->reg_addr->depth_log1_offset); - for (acc = 0; acc < NUM_ACC; acc++) { - qtopFromAcc(&q_top, acc, acc_conf); - if (q_top->first_qgroup_index < ACC_NUM_QGRPS_PER_WORD) - q_top->aq_depth_log2 = (reg_len0 >> (q_top->first_qgroup_index * 4)) & 0xF; - else - q_top->aq_depth_log2 = (reg_len1 >> ((q_top->first_qgroup_index - - ACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF; + /* Check the depth of the AQs. */ + reg_len0 = acc_reg_read(d, d->reg_addr->depth_log0_offset); + reg_len1 = acc_reg_read(d, d->reg_addr->depth_log0_offset + 4); + reg_len2 = acc_reg_read(d, d->reg_addr->depth_log0_offset + 8); + reg_len3 = acc_reg_read(d, d->reg_addr->depth_log0_offset + 12); + + for (acc = 0; acc < NUM_ACC; acc++) { + qtopFromAcc(&q_top, acc, acc_conf); + if (q_top->first_qgroup_index / ACC_NUM_QGRPS_PER_WORD == 0) + q_top->aq_depth_log2 = (reg_len0 >> ((q_top->first_qgroup_index % + ACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF; + else if (q_top->first_qgroup_index / ACC_NUM_QGRPS_PER_WORD == 1) + q_top->aq_depth_log2 = (reg_len1 >> ((q_top->first_qgroup_index % + ACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF; + else if (q_top->first_qgroup_index / ACC_NUM_QGRPS_PER_WORD == 2) + q_top->aq_depth_log2 = (reg_len2 >> ((q_top->first_qgroup_index % + ACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF; + else + q_top->aq_depth_log2 = (reg_len3 >> ((q_top->first_qgroup_index % + ACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF; + } } /* Read PF mode. */ @@ -250,7 +348,7 @@ fetch_acc_config(struct rte_bbdev *dev) } rte_bbdev_log_debug( - "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u AQ %u %u %u %u %u Len %u %u %u %u %u\n", + "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u %u AQ %u %u %u %u %u %u Len %u %u %u %u %u %u\n", (d->pf_device) ? "PF" : "VF", (acc_conf->input_pos_llr_1_bit) ? "POS" : "NEG", (acc_conf->output_pos_llr_1_bit) ? "POS" : "NEG", @@ -259,22 +357,19 @@ fetch_acc_config(struct rte_bbdev *dev) acc_conf->q_ul_5g.num_qgroups, acc_conf->q_dl_5g.num_qgroups, acc_conf->q_fft.num_qgroups, + acc_conf->q_mld.num_qgroups, acc_conf->q_ul_4g.num_aqs_per_groups, acc_conf->q_dl_4g.num_aqs_per_groups, acc_conf->q_ul_5g.num_aqs_per_groups, acc_conf->q_dl_5g.num_aqs_per_groups, acc_conf->q_fft.num_aqs_per_groups, + acc_conf->q_mld.num_aqs_per_groups, acc_conf->q_ul_4g.aq_depth_log2, acc_conf->q_dl_4g.aq_depth_log2, acc_conf->q_ul_5g.aq_depth_log2, acc_conf->q_dl_5g.aq_depth_log2, - acc_conf->q_fft.aq_depth_log2); -} - -static inline void -vrb_vf2pf(struct acc_device *d, unsigned int payload) -{ - acc_reg_write(d, d->reg_addr->vf2pf_doorbell, payload); + acc_conf->q_fft.aq_depth_log2, + acc_conf->q_mld.aq_depth_log2); } /* Request device status information. */ @@ -303,17 +398,18 @@ static inline void vrb_check_ir(struct acc_device *acc_dev) { volatile union acc_info_ring_data *ring_data; - uint16_t info_ring_head = acc_dev->info_ring_head; + uint16_t info_ring_head = acc_dev->info_ring_head, int_nb; if (unlikely(acc_dev->info_ring == NULL)) return; ring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK); while (ring_data->valid) { - if ((ring_data->int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || ( - ring_data->int_nb > ACC_PF_INT_DMA_DL5G_DESC_IRQ)) { + int_nb = int_from_ring(*ring_data, acc_dev->device_variant); + if ((int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || ( + int_nb > ACC_PF_INT_DMA_MLD_DESC_IRQ)) { rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x", - ring_data->int_nb, ring_data->detailed_info); + int_nb, ring_data->detailed_info); /* Initialize Info Ring entry and move forward. */ ring_data->val = 0; } @@ -330,29 +426,33 @@ vrb_dev_interrupt_handler(void *cb_arg) struct acc_device *acc_dev = dev->data->dev_private; volatile union acc_info_ring_data *ring_data; struct acc_deq_intr_details deq_intr_det; + uint16_t vf_id, aq_id, qg_id, int_nb; ring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK); while (ring_data->valid) { + vf_id = vf_from_ring(*ring_data, acc_dev->device_variant); + aq_id = aq_from_ring(*ring_data, acc_dev->device_variant); + qg_id = qg_from_ring(*ring_data, acc_dev->device_variant); + int_nb = int_from_ring(*ring_data, acc_dev->device_variant); if (acc_dev->pf_device) { rte_bbdev_log_debug( - "VRB1 PF Interrupt received, Info Ring data: 0x%x -> %d", - ring_data->val, ring_data->int_nb); + "PF Interrupt received, Info Ring data: 0x%x -> %d", + ring_data->val, int_nb); - switch (ring_data->int_nb) { + switch (int_nb) { case ACC_PF_INT_DMA_DL_DESC_IRQ: case ACC_PF_INT_DMA_UL_DESC_IRQ: case ACC_PF_INT_DMA_FFT_DESC_IRQ: case ACC_PF_INT_DMA_UL5G_DESC_IRQ: case ACC_PF_INT_DMA_DL5G_DESC_IRQ: + case ACC_PF_INT_DMA_MLD_DESC_IRQ: deq_intr_det.queue_id = get_queue_id_from_ring_info( dev->data, *ring_data); if (deq_intr_det.queue_id == UINT16_MAX) { rte_bbdev_log(ERR, "Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u", - ring_data->aq_id, - ring_data->qg_id, - ring_data->vf_id); + aq_id, qg_id, vf_id); return; } rte_bbdev_pmd_callback_process(dev, @@ -364,23 +464,23 @@ vrb_dev_interrupt_handler(void *cb_arg) } } else { rte_bbdev_log_debug( - "VRB1 VF Interrupt received, Info Ring data: 0x%x\n", + "VRB VF Interrupt received, Info Ring data: 0x%x\n", ring_data->val); - switch (ring_data->int_nb) { + switch (int_nb) { case ACC_VF_INT_DMA_DL_DESC_IRQ: case ACC_VF_INT_DMA_UL_DESC_IRQ: case ACC_VF_INT_DMA_FFT_DESC_IRQ: case ACC_VF_INT_DMA_UL5G_DESC_IRQ: case ACC_VF_INT_DMA_DL5G_DESC_IRQ: + case ACC_VF_INT_DMA_MLD_DESC_IRQ: /* VFs are not aware of their vf_id - it's set to 0. */ - ring_data->vf_id = 0; + set_vf_in_ring(ring_data, acc_dev->device_variant, 0); deq_intr_det.queue_id = get_queue_id_from_ring_info( dev->data, *ring_data); if (deq_intr_det.queue_id == UINT16_MAX) { rte_bbdev_log(ERR, "Couldn't find queue: aq_id: %u, qg_id: %u", - ring_data->aq_id, - ring_data->qg_id); + aq_id, qg_id); return; } rte_bbdev_pmd_callback_process(dev, @@ -395,8 +495,7 @@ vrb_dev_interrupt_handler(void *cb_arg) /* Initialize Info Ring entry and move forward. */ ring_data->val = 0; ++acc_dev->info_ring_head; - ring_data = acc_dev->info_ring + - (acc_dev->info_ring_head & ACC_INFO_RING_MASK); + ring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK); } } @@ -428,7 +527,10 @@ allocate_info_ring(struct rte_bbdev *dev) phys_low = (uint32_t)(info_ring_iova); acc_reg_write(d, d->reg_addr->info_ring_hi, phys_high); acc_reg_write(d, d->reg_addr->info_ring_lo, phys_low); - acc_reg_write(d, d->reg_addr->info_ring_en, VRB1_REG_IRQ_EN_ALL); + if (d->device_variant == VRB1_VARIANT) + acc_reg_write(d, d->reg_addr->info_ring_en, VRB1_REG_IRQ_EN_ALL); + else + acc_reg_write(d, d->reg_addr->info_ring_en, VRB2_REG_IRQ_EN_ALL); d->info_ring_head = (acc_reg_read(d, d->reg_addr->info_ring_ptr) & 0xFFF) / sizeof(union acc_info_ring_data); return 0; @@ -507,6 +609,10 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) acc_reg_write(d, d->reg_addr->dma_ring_dl4g_lo, phys_low); acc_reg_write(d, d->reg_addr->dma_ring_fft_hi, phys_high); acc_reg_write(d, d->reg_addr->dma_ring_fft_lo, phys_low); + if (d->device_variant == VRB2_VARIANT) { + acc_reg_write(d, d->reg_addr->dma_ring_mld_hi, phys_high); + acc_reg_write(d, d->reg_addr->dma_ring_mld_lo, phys_low); + } /* * Configure Ring Size to the max queue ring size * (used for wrapping purpose). @@ -516,8 +622,7 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) /* Configure tail pointer for use when SDONE enabled. */ if (d->tail_ptrs == NULL) - d->tail_ptrs = rte_zmalloc_socket( - dev->device->driver->name, + d->tail_ptrs = rte_zmalloc_socket(dev->device->driver->name, VRB_MAX_QGRPS * VRB_MAX_AQS * sizeof(uint32_t), RTE_CACHE_LINE_SIZE, socket_id); if (d->tail_ptrs == NULL) { @@ -541,6 +646,10 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) acc_reg_write(d, d->reg_addr->tail_ptrs_dl4g_lo, phys_low); acc_reg_write(d, d->reg_addr->tail_ptrs_fft_hi, phys_high); acc_reg_write(d, d->reg_addr->tail_ptrs_fft_lo, phys_low); + if (d->device_variant == VRB2_VARIANT) { + acc_reg_write(d, d->reg_addr->tail_ptrs_mld_hi, phys_high); + acc_reg_write(d, d->reg_addr->tail_ptrs_mld_lo, phys_low); + } ret = allocate_info_ring(dev); if (ret < 0) { @@ -638,10 +747,17 @@ vrb_intr_enable(struct rte_bbdev *dev) return ret; } - if (acc_dev->pf_device) - max_queues = VRB1_MAX_PF_MSIX; - else - max_queues = VRB1_MAX_VF_MSIX; + if (d->device_variant == VRB1_VARIANT) { + if (acc_dev->pf_device) + max_queues = VRB1_MAX_PF_MSIX; + else + max_queues = VRB1_MAX_VF_MSIX; + } else { + if (acc_dev->pf_device) + max_queues = VRB2_MAX_PF_MSIX; + else + max_queues = VRB2_MAX_VF_MSIX; + } if (rte_intr_efd_enable(dev->intr_handle, max_queues)) { rte_bbdev_log(ERR, "Failed to create fds for %u queues", @@ -720,7 +836,7 @@ vrb_find_free_queue_idx(struct rte_bbdev *dev, const struct rte_bbdev_queue_conf *conf) { struct acc_device *d = dev->data->dev_private; - int op_2_acc[6] = {0, UL_4G, DL_4G, UL_5G, DL_5G, FFT}; + int op_2_acc[7] = {0, UL_4G, DL_4G, UL_5G, DL_5G, FFT, MLD}; int acc = op_2_acc[conf->op_type]; struct rte_acc_queue_topology *qtop = NULL; uint16_t group_idx; @@ -743,7 +859,7 @@ vrb_find_free_queue_idx(struct rte_bbdev *dev, /* Mark the Queue as assigned. */ d->q_assigned_bit_map[group_idx] |= (1ULL << aq_idx); /* Report the AQ Index. */ - return (group_idx << VRB1_GRP_ID_SHIFT) + aq_idx; + return queue_index(group_idx, aq_idx, d->device_variant); } } rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u", @@ -783,7 +899,11 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ? ACC_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ? ACC_FCW_TD_BLEN : (conf->op_type == RTE_BBDEV_OP_LDPC_DEC ? - ACC_FCW_LD_BLEN : ACC_FCW_FFT_BLEN))); + ACC_FCW_LD_BLEN : (conf->op_type == RTE_BBDEV_OP_FFT ? + ACC_FCW_FFT_BLEN : ACC_FCW_MLDTS_BLEN)))); + + if ((q->d->device_variant == VRB2_VARIANT) && (conf->op_type == RTE_BBDEV_OP_FFT)) + fcw_len = ACC_FCW_FFT_BLEN_3; for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) { desc = q->ring_addr + desc_idx; @@ -862,9 +982,29 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, goto free_companion_ring_addr; } - q->qgrp_id = (q_idx >> VRB1_GRP_ID_SHIFT) & 0xF; - q->vf_id = (q_idx >> VRB1_VF_ID_SHIFT) & 0x3F; - q->aq_id = q_idx & 0xF; + q->fcw_ring = rte_zmalloc_socket(dev->device->driver->name, + ACC_MAX_FCW_SIZE * d->sw_ring_max_depth, + RTE_CACHE_LINE_SIZE, conf->socket); + if (q->fcw_ring == NULL) { + rte_bbdev_log(ERR, "Failed to allocate fcw_ring memory"); + ret = -ENOMEM; + goto free_companion_ring_addr; + } + q->fcw_ring_addr_iova = rte_malloc_virt2iova(q->fcw_ring); + + /* For FFT we need to store the FCW separately */ + if (conf->op_type == RTE_BBDEV_OP_FFT) { + for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) { + desc = q->ring_addr + desc_idx; + desc->req.data_ptrs[0].address = q->fcw_ring_addr_iova + + desc_idx * ACC_MAX_FCW_SIZE; + } + } + + q->qgrp_id = qg_from_q(q_idx, d->device_variant); + q->vf_id = vf_from_q(q_idx, d->device_variant); + q->aq_id = aq_from_q(q_idx, d->device_variant); + q->aq_depth = 0; if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC) q->aq_depth = (1 << d->acc_conf.q_ul_4g.aq_depth_log2); @@ -876,6 +1016,8 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, q->aq_depth = (1 << d->acc_conf.q_dl_5g.aq_depth_log2); else if (conf->op_type == RTE_BBDEV_OP_FFT) q->aq_depth = (1 << d->acc_conf.q_fft.aq_depth_log2); + else if (conf->op_type == RTE_BBDEV_OP_MLDTS) + q->aq_depth = (1 << d->acc_conf.q_mld.aq_depth_log2); q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base, d->queue_offset(d->pf_device, q->vf_id, q->qgrp_id, q->aq_id)); @@ -932,6 +1074,13 @@ vrb_print_op(struct rte_bbdev_dec_op *op, enum rte_bbdev_op_type op_type, op_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e, op_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index ); + } else if (op_type == RTE_BBDEV_OP_MLDTS) { + struct rte_bbdev_mldts_op *op_mldts = (struct rte_bbdev_mldts_op *) op; + rte_bbdev_log(INFO, " Op MLD %d RBs %d NL %d Rp %d %d %x\n", + index, + op_mldts->mldts.num_rbs, op_mldts->mldts.num_layers, + op_mldts->mldts.r_rep, + op_mldts->mldts.c_rep, op_mldts->mldts.op_flags); } } @@ -973,6 +1122,7 @@ vrb_queue_release(struct rte_bbdev *dev, uint16_t q_id) if (q != NULL) { /* Mark the Queue as un-assigned. */ d->q_assigned_bit_map[q->qgrp_id] &= (~0ULL - (1 << (uint64_t) q->aq_id)); + rte_free(q->fcw_ring); rte_free(q->companion_ring_addr); rte_free(q->lb_in); rte_free(q->lb_out); @@ -997,15 +1147,11 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | RTE_BBDEV_TURBO_CRC_TYPE_24B | RTE_BBDEV_TURBO_DEC_CRC_24B_DROP | - RTE_BBDEV_TURBO_EQUALIZER | - RTE_BBDEV_TURBO_SOFT_OUT_SATURATE | RTE_BBDEV_TURBO_HALF_ITERATION_EVEN | RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH | - RTE_BBDEV_TURBO_SOFT_OUTPUT | RTE_BBDEV_TURBO_EARLY_TERMINATION | RTE_BBDEV_TURBO_DEC_INTERRUPTS | RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | - RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT | RTE_BBDEV_TURBO_MAP_DEC | RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP | RTE_BBDEV_TURBO_DEC_SCATTER_GATHER, @@ -1081,12 +1227,131 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) RTE_BBDEV_FFT_DFT_BYPASS | RTE_BBDEV_FFT_IDFT_BYPASS | RTE_BBDEV_FFT_WINDOWING_BYPASS, + .num_buffers_src = 1, + .num_buffers_dst = 1, + .fft_windows_num = ACC_MAX_FFT_WIN, + } + }, + RTE_BBDEV_END_OF_CAPABILITIES_LIST() + }; + + static const struct rte_bbdev_op_cap vrb2_bbdev_capabilities[] = { + { + .type = RTE_BBDEV_OP_TURBO_DEC, + .cap.turbo_dec = { + .capability_flags = + RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | + RTE_BBDEV_TURBO_CRC_TYPE_24B | + RTE_BBDEV_TURBO_DEC_CRC_24B_DROP | + RTE_BBDEV_TURBO_EQUALIZER | + RTE_BBDEV_TURBO_SOFT_OUT_SATURATE | + RTE_BBDEV_TURBO_HALF_ITERATION_EVEN | + RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH | + RTE_BBDEV_TURBO_SOFT_OUTPUT | + RTE_BBDEV_TURBO_EARLY_TERMINATION | + RTE_BBDEV_TURBO_DEC_INTERRUPTS | + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT | + RTE_BBDEV_TURBO_MAP_DEC | + RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP | + RTE_BBDEV_TURBO_DEC_SCATTER_GATHER, + .max_llr_modulus = INT8_MAX, + .num_buffers_src = + RTE_BBDEV_TURBO_MAX_CODE_BLOCKS, + .num_buffers_hard_out = + RTE_BBDEV_TURBO_MAX_CODE_BLOCKS, + .num_buffers_soft_out = + RTE_BBDEV_TURBO_MAX_CODE_BLOCKS, + } + }, + { + .type = RTE_BBDEV_OP_TURBO_ENC, + .cap.turbo_enc = { + .capability_flags = + RTE_BBDEV_TURBO_CRC_24B_ATTACH | + RTE_BBDEV_TURBO_RV_INDEX_BYPASS | + RTE_BBDEV_TURBO_RATE_MATCH | + RTE_BBDEV_TURBO_ENC_INTERRUPTS | + RTE_BBDEV_TURBO_ENC_SCATTER_GATHER, + .num_buffers_src = + RTE_BBDEV_TURBO_MAX_CODE_BLOCKS, + .num_buffers_dst = + RTE_BBDEV_TURBO_MAX_CODE_BLOCKS, + } + }, + { + .type = RTE_BBDEV_OP_LDPC_ENC, + .cap.ldpc_enc = { + .capability_flags = + RTE_BBDEV_LDPC_RATE_MATCH | + RTE_BBDEV_LDPC_CRC_24B_ATTACH | + RTE_BBDEV_LDPC_INTERLEAVER_BYPASS | + RTE_BBDEV_LDPC_ENC_INTERRUPTS | + RTE_BBDEV_LDPC_ENC_SCATTER_GATHER | + RTE_BBDEV_LDPC_ENC_CONCATENATION, .num_buffers_src = RTE_BBDEV_LDPC_MAX_CODE_BLOCKS, .num_buffers_dst = RTE_BBDEV_LDPC_MAX_CODE_BLOCKS, } }, + { + .type = RTE_BBDEV_OP_LDPC_DEC, + .cap.ldpc_dec = { + .capability_flags = + RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK | + RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP | + RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK | + RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK | + RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE | + RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE | + RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE | + RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS | + RTE_BBDEV_LDPC_DEC_SCATTER_GATHER | + RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION | + RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION | + RTE_BBDEV_LDPC_LLR_COMPRESSION | + RTE_BBDEV_LDPC_SOFT_OUT_ENABLE | + RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS | + RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS | + RTE_BBDEV_LDPC_DEC_INTERRUPTS, + .llr_size = 8, + .llr_decimals = 2, + .num_buffers_src = + RTE_BBDEV_LDPC_MAX_CODE_BLOCKS, + .num_buffers_hard_out = + RTE_BBDEV_LDPC_MAX_CODE_BLOCKS, + .num_buffers_soft_out = 0, + } + }, + { + .type = RTE_BBDEV_OP_FFT, + .cap.fft = { + .capability_flags = + RTE_BBDEV_FFT_WINDOWING | + RTE_BBDEV_FFT_CS_ADJUSTMENT | + RTE_BBDEV_FFT_DFT_BYPASS | + RTE_BBDEV_FFT_IDFT_BYPASS | + RTE_BBDEV_FFT_FP16_INPUT | + RTE_BBDEV_FFT_FP16_OUTPUT | + RTE_BBDEV_FFT_POWER_MEAS | + RTE_BBDEV_FFT_WINDOWING_BYPASS, + .num_buffers_src = 1, + .num_buffers_dst = 1, + .fft_windows_num = ACC_MAX_FFT_WIN, + } + }, + { + .type = RTE_BBDEV_OP_MLDTS, + .cap.mld = { + .capability_flags = + RTE_BBDEV_MLDTS_REP, + .num_buffers_src = + 1, + .num_buffers_dst = + 1, + } + }, RTE_BBDEV_END_OF_CAPABILITIES_LIST() }; @@ -1100,6 +1365,7 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) fetch_acc_config(dev); /* Check the status of device. */ dev_info->device_status = vrb_device_status(dev); + dev_info->fft_window_width = d->fft_window_width; /* Exposed number of queues. */ dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0; @@ -1113,13 +1379,16 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) d->acc_conf.q_dl_5g.num_qgroups; dev_info->num_queues[RTE_BBDEV_OP_FFT] = d->acc_conf.q_fft.num_aqs_per_groups * d->acc_conf.q_fft.num_qgroups; + dev_info->num_queues[RTE_BBDEV_OP_MLDTS] = d->acc_conf.q_mld.num_aqs_per_groups * + d->acc_conf.q_mld.num_qgroups; dev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = d->acc_conf.q_ul_4g.num_qgroups; dev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = d->acc_conf.q_dl_4g.num_qgroups; dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_qgroups; dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_qgroups; dev_info->queue_priority[RTE_BBDEV_OP_FFT] = d->acc_conf.q_fft.num_qgroups; + dev_info->queue_priority[RTE_BBDEV_OP_MLDTS] = d->acc_conf.q_mld.num_qgroups; dev_info->max_num_queues = 0; - for (i = RTE_BBDEV_OP_NONE; i <= RTE_BBDEV_OP_FFT; i++) + for (i = RTE_BBDEV_OP_NONE; i <= RTE_BBDEV_OP_MLDTS; i++) dev_info->max_num_queues += dev_info->num_queues[i]; dev_info->queue_size_lim = ACC_MAX_QUEUE_DEPTH; dev_info->hardware_accelerated = true; @@ -1130,7 +1399,10 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) dev_info->default_queue_conf = default_queue_conf; dev_info->cpu_flag_reqs = NULL; dev_info->min_alignment = 1; - dev_info->capabilities = vrb1_bbdev_capabilities; + if (d->device_variant == VRB1_VARIANT) + dev_info->capabilities = vrb1_bbdev_capabilities; + else + dev_info->capabilities = vrb2_bbdev_capabilities; dev_info->harq_buffer_size = 0; vrb_check_ir(d); @@ -1179,6 +1451,9 @@ static struct rte_pci_id pci_id_vrb_pf_map[] = { { RTE_PCI_DEVICE(RTE_VRB1_VENDOR_ID, RTE_VRB1_PF_DEVICE_ID) }, + { + RTE_PCI_DEVICE(RTE_VRB2_VENDOR_ID, RTE_VRB2_PF_DEVICE_ID) + }, {.device_id = 0}, }; @@ -1187,6 +1462,9 @@ static struct rte_pci_id pci_id_vrb_vf_map[] = { { RTE_PCI_DEVICE(RTE_VRB1_VENDOR_ID, RTE_VRB1_VF_DEVICE_ID) }, + { + RTE_PCI_DEVICE(RTE_VRB2_VENDOR_ID, RTE_VRB2_VF_DEVICE_ID) + }, {.device_id = 0}, }; @@ -1223,6 +1501,7 @@ vrb_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw) fcw->ea = op->turbo_dec.cb_params.e; fcw->eb = op->turbo_dec.cb_params.e; } + if (op->turbo_dec.rv_index == 0) fcw->k0_start_col = ACC_FCW_TD_RVIDX_0; else if (op->turbo_dec.rv_index == 1) @@ -1241,7 +1520,7 @@ vrb_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw) fcw->bypass_teq = 0; } - fcw->code_block_mode = 1; /* FIXME */ + fcw->code_block_mode = 1; fcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_CRC_TYPE_24B); @@ -1258,8 +1537,8 @@ vrb_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw) /* Fill in a frame control word for LDPC decoding. */ static inline void -vrb1_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, - union acc_harq_layout_data *harq_layout) +vrb_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, + union acc_harq_layout_data *harq_layout, uint16_t device_variant) { uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset; uint32_t harq_index; @@ -1305,18 +1584,26 @@ vrb1_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, fcw->bypass_intlv = 1; fcw->qm = 2; } - fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags, - RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION); - fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags, - RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION); + if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION)) { + fcw->hcin_decomp_mode = 1; + fcw->hcout_comp_mode = 1; + } else if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION)) { + fcw->hcin_decomp_mode = 4; + fcw->hcout_comp_mode = 4; + } else { + fcw->hcin_decomp_mode = 0; + fcw->hcout_comp_mode = 0; + } + fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_LLR_COMPRESSION); harq_index = hq_index(op->ldpc_dec.harq_combined_output.offset); - if (fcw->hcin_en > 0) { harq_in_length = op->ldpc_dec.harq_combined_input.length; - if (fcw->hcin_decomp_mode > 0) + if (fcw->hcin_decomp_mode == 1) harq_in_length = harq_in_length * 8 / 6; + else if (fcw->hcin_decomp_mode == 4) + harq_in_length = harq_in_length * 2; harq_in_length = RTE_MIN(harq_in_length, op->ldpc_dec.n_cb - op->ldpc_dec.n_filler); harq_in_length = RTE_ALIGN_CEIL(harq_in_length, 64); @@ -1330,20 +1617,27 @@ vrb1_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, } fcw->itmax = op->ldpc_dec.iter_max; - fcw->itstop = check_bit(op->ldpc_dec.op_flags, - RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE); + fcw->itstop = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE); fcw->cnu_algo = ACC_ALGO_MSA; fcw->synd_precoder = fcw->itstop; + + if (device_variant != VRB1_VARIANT) { + fcw->so_it = op->ldpc_dec.iter_max; + fcw->so_en = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE); + fcw->so_bypass_intlv = check_bit(op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS); + fcw->so_bypass_rm = check_bit(op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS); + fcw->minsum_offset = 1; + fcw->dec_llrclip = 2; + } + /* - * These are all implicitly set: + * These are all implicitly set * fcw->synd_post = 0; - * fcw->so_en = 0; - * fcw->so_bypass_rm = 0; - * fcw->so_bypass_intlv = 0; * fcw->dec_convllr = 0; * fcw->hcout_convllr = 0; * fcw->hcout_size1 = 0; - * fcw->so_it = 0; * fcw->hcout_offset = 0; * fcw->negstop_th = 0; * fcw->negstop_it = 0; @@ -1354,7 +1648,8 @@ vrb1_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, if (fcw->hcout_en > 0) { parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8) * op->ldpc_dec.z_c - op->ldpc_dec.n_filler; - k0_p = (fcw->k0 > parity_offset) ? fcw->k0 - op->ldpc_dec.n_filler : fcw->k0; + k0_p = (fcw->k0 > parity_offset) ? + fcw->k0 - op->ldpc_dec.n_filler : fcw->k0; ncb_p = fcw->ncb - op->ldpc_dec.n_filler; l = k0_p + fcw->rm_e; harq_out_length = (uint16_t) fcw->hcin_size0; @@ -1401,8 +1696,8 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, if (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) { k = op->turbo_dec.tb_params.k_pos; e = (r < op->turbo_dec.tb_params.cab) - ? op->turbo_dec.tb_params.ea - : op->turbo_dec.tb_params.eb; + ? op->turbo_dec.tb_params.ea + : op->turbo_dec.tb_params.eb; } else { k = op->turbo_dec.cb_params.k; e = op->turbo_dec.cb_params.e; @@ -1497,19 +1792,28 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op, } static inline int -vrb1_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, +vrb_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, struct acc_dma_req_desc *desc, struct rte_mbuf **input, struct rte_mbuf *h_output, uint32_t *in_offset, uint32_t *h_out_offset, uint32_t *h_out_length, uint32_t *mbuf_total_left, - uint32_t *seg_total_left, struct acc_fcw_ld *fcw) + uint32_t *seg_total_left, struct acc_fcw_ld *fcw, uint16_t device_variant) { struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec; int next_triplet = 1; /* FCW already done. */ uint32_t input_length; uint16_t output_length, crc24_overlap = 0; uint16_t sys_cols, K, h_p_size, h_np_size; - bool h_comp = check_bit(dec->op_flags, RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION); + + if (device_variant == VRB1_VARIANT) { + if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION) || + check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)) { + rte_bbdev_log(ERR, + "VRB1 does not support the requested capabilities %x", + op->ldpc_dec.op_flags); + return -1; + } + } acc_header_init(desc); @@ -1550,8 +1854,10 @@ vrb1_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, return -1; } h_p_size = fcw->hcin_size0 + fcw->hcin_size1; - if (h_comp) + if (fcw->hcin_decomp_mode == 1) h_p_size = (h_p_size * 3 + 3) / 4; + else if (fcw->hcin_decomp_mode == 4) + h_p_size = h_p_size / 2; if (op->ldpc_dec.harq_combined_input.data == 0) { rte_bbdev_log(ERR, "HARQ input is not defined"); return -1; @@ -1574,21 +1880,36 @@ vrb1_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, *h_out_offset, output_length >> 3, next_triplet, ACC_DMA_BLKID_OUT_HARD); - if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) { + if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)) { + if (op->ldpc_dec.soft_output.data == 0) { + rte_bbdev_log(ERR, "Soft output is not defined"); + return -1; + } + dec->soft_output.length = fcw->rm_e; + acc_dma_fill_blk_type(desc, dec->soft_output.data, dec->soft_output.offset, + fcw->rm_e, next_triplet, ACC_DMA_BLKID_OUT_SOFT); + next_triplet++; + } + + if (check_bit(op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) { if (op->ldpc_dec.harq_combined_output.data == 0) { rte_bbdev_log(ERR, "HARQ output is not defined"); return -1; } - /* Pruned size of the HARQ. */ + /* Pruned size of the HARQ */ h_p_size = fcw->hcout_size0 + fcw->hcout_size1; - /* Non-Pruned size of the HARQ. */ + /* Non-Pruned size of the HARQ */ h_np_size = fcw->hcout_offset > 0 ? fcw->hcout_offset + fcw->hcout_size1 : h_p_size; - if (h_comp) { + if (fcw->hcin_decomp_mode == 1) { h_np_size = (h_np_size * 3 + 3) / 4; h_p_size = (h_p_size * 3 + 3) / 4; + } else if (fcw->hcin_decomp_mode == 4) { + h_np_size = h_np_size / 2; + h_p_size = h_p_size / 2; } dec->harq_combined_output.length = h_np_size; acc_dma_fill_blk_type( @@ -1656,7 +1977,7 @@ vrb_dma_desc_ld_update(struct rte_bbdev_dec_op *op, desc->op_addr = op; } -/* Enqueue one encode operations for device in CB mode */ +/* Enqueue one encode operations for device in CB mode. */ static inline int enqueue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op, uint16_t total_enqueued_cbs) @@ -1716,6 +2037,7 @@ enqueue_ldpc_enc_n_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ops, /** This could be done at polling. */ acc_header_init(&desc->req); desc->req.numCBs = num; + desc->req.dltb = 0; in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len; out_length = (enc->cb_params.e + 7) >> 3; @@ -1754,7 +2076,7 @@ enqueue_ldpc_enc_n_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ops, return num; } -/* Enqueue one encode operations for device for a partial TB +/* Enqueue one encode operations for VRB1 device for a partial TB * all codes blocks have same configuration multiplexed on the same descriptor. */ static inline void @@ -1941,6 +2263,104 @@ vrb1_enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op return return_descs; } +/* Fill in a frame control word for LDPC encoding. */ +static inline void +vrb2_fcw_letb_fill(const struct rte_bbdev_enc_op *op, struct acc_fcw_le *fcw) +{ + fcw->qm = op->ldpc_enc.q_m; + fcw->nfiller = op->ldpc_enc.n_filler; + fcw->BG = (op->ldpc_enc.basegraph - 1); + fcw->Zc = op->ldpc_enc.z_c; + fcw->ncb = op->ldpc_enc.n_cb; + fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph, + op->ldpc_enc.rv_index); + fcw->rm_e = op->ldpc_enc.tb_params.ea; + fcw->rm_e_b = op->ldpc_enc.tb_params.eb; + fcw->crc_select = check_bit(op->ldpc_enc.op_flags, + RTE_BBDEV_LDPC_CRC_24B_ATTACH); + fcw->bypass_intlv = 0; + if (op->ldpc_enc.tb_params.c > 1) { + fcw->mcb_count = 0; + fcw->C = op->ldpc_enc.tb_params.c; + fcw->Cab = op->ldpc_enc.tb_params.cab; + } else { + fcw->mcb_count = 1; + fcw->C = 0; + } +} + +/* Enqueue one encode operations for device in TB mode. + * returns the number of descs used. + */ +static inline int +vrb2_enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, + uint16_t enq_descs) +{ + union acc_dma_desc *desc = NULL; + uint32_t in_offset, out_offset, out_length, seg_total_left; + struct rte_mbuf *input, *output_head, *output; + struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc; + int next_triplet = 1; /* FCW already done. */ + uint32_t in_length_in_bytes; + uint16_t K, in_length_in_bits; + + desc = acc_desc(q, enq_descs); + vrb2_fcw_letb_fill(op, &desc->req.fcw_le); + + input = enc->input.data; + output_head = output = enc->output.data; + in_offset = enc->input.offset; + out_offset = enc->output.offset; + seg_total_left = rte_pktmbuf_data_len(enc->input.data) - in_offset; + + acc_header_init(&desc->req); + K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c; + in_length_in_bits = K - enc->n_filler; + if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) || + (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)) + in_length_in_bits -= 24; + in_length_in_bytes = (in_length_in_bits >> 3) * enc->tb_params.c; + + next_triplet = acc_dma_fill_blk_type_in(&desc->req, &input, &in_offset, + in_length_in_bytes, &seg_total_left, next_triplet, + check_bit(enc->op_flags, RTE_BBDEV_LDPC_ENC_SCATTER_GATHER)); + if (unlikely(next_triplet < 0)) { + rte_bbdev_log(ERR, + "Mismatch between data to process and mbuf data length in bbdev_op: %p", + op); + return -1; + } + desc->req.data_ptrs[next_triplet - 1].last = 1; + desc->req.m2dlen = next_triplet; + + /* Set output length */ + /* Integer round up division by 8 */ + out_length = (enc->tb_params.ea * enc->tb_params.cab + + enc->tb_params.eb * (enc->tb_params.c - enc->tb_params.cab) + 7) >> 3; + + next_triplet = acc_dma_fill_blk_type(&desc->req, output, out_offset, + out_length, next_triplet, ACC_DMA_BLKID_OUT_ENC); + enc->output.length = out_length; + out_offset += out_length; + desc->req.data_ptrs[next_triplet - 1].last = 1; + desc->req.data_ptrs[next_triplet - 1].dma_ext = 0; + desc->req.d2mlen = next_triplet - desc->req.m2dlen; + desc->req.numCBs = enc->tb_params.c; + if (desc->req.numCBs > 1) + desc->req.dltb = 1; + desc->req.op_addr = op; + + if (out_length < ACC_MAX_E_MBUF) + mbuf_append(output_head, output, out_length); + +#ifdef RTE_LIBRTE_BBDEV_DEBUG + rte_memdump(stderr, "FCW", &desc->req.fcw_le, sizeof(desc->req.fcw_le)); + rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc)); +#endif + /* One CB (one op) was successfully prepared to enqueue */ + return 1; +} + /** Enqueue one decode operations for device in CB mode. */ static inline int enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, @@ -1953,6 +2373,12 @@ enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, struct rte_mbuf *input, *h_output_head, *h_output, *s_output_head, *s_output; + if ((q->d->device_variant == VRB1_VARIANT) && + (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT))) { + /* SO not supported for VRB1. */ + return -EPERM; + } + desc = acc_desc(q, total_enqueued_cbs); vrb_fcw_td_fill(op, &desc->req.fcw_td); @@ -1994,7 +2420,7 @@ enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, return 1; } -/** Enqueue one decode operations for device in CB mode */ +/** Enqueue one decode operations for device in CB mode. */ static inline int vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, uint16_t total_enqueued_cbs, bool same_op) @@ -2039,7 +2465,7 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw; uint32_t seg_total_left; fcw = &desc->req.fcw_ld; - q->d->fcw_ld_fill(op, fcw, harq_layout); + vrb_fcw_ld_fill(op, fcw, harq_layout, q->d->device_variant); /* Special handling when using mbuf or not. */ if (check_bit(op->ldpc_dec.op_flags, @@ -2048,10 +2474,10 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, else seg_total_left = fcw->rm_e; - ret = vrb1_dma_desc_ld_fill(op, &desc->req, &input, h_output, + ret = vrb_dma_desc_ld_fill(op, &desc->req, &input, h_output, &in_offset, &h_out_offset, &h_out_length, &mbuf_total_left, - &seg_total_left, fcw); + &seg_total_left, fcw, q->d->device_variant); if (unlikely(ret < 0)) return ret; } @@ -2108,7 +2534,8 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, desc_first = desc; fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET; harq_layout = q->d->harq_layout; - q->d->fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout); + + vrb_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout, q->d->device_variant); input = op->ldpc_dec.input.data; h_output_head = h_output = op->ldpc_dec.hard_output.data; @@ -2140,12 +2567,11 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, desc->req.data_ptrs[0].blen = ACC_FCW_LD_BLEN; rte_memcpy(&desc->req.fcw_ld, &desc_first->req.fcw_ld, ACC_FCW_LD_BLEN); desc->req.fcw_ld.tb_trailer_size = (c - r - 1) * trail_len; - - ret = vrb1_dma_desc_ld_fill(op, &desc->req, &input, + ret = vrb_dma_desc_ld_fill(op, &desc->req, &input, h_output, &in_offset, &h_out_offset, &h_out_length, &mbuf_total_left, &seg_total_left, - &desc->req.fcw_ld); + &desc->req.fcw_ld, q->d->device_variant); if (unlikely(ret < 0)) return ret; @@ -2187,7 +2613,7 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, return current_enqueued_cbs; } -/* Enqueue one decode operations for device in TB mode */ +/* Enqueue one decode operations for device in TB mode. */ static inline int enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, uint16_t total_enqueued_cbs, uint8_t cbs_in_tb) @@ -2409,14 +2835,22 @@ vrb_enqueue_ldpc_enc_tb(struct rte_bbdev_queue_data *q_data, int descs_used; for (i = 0; i < num; ++i) { - cbs_in_tb = get_num_cbs_in_tb_ldpc_enc(&ops[i]->ldpc_enc); - /* Check if there are available space for further processing. */ - if (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) { - acc_enqueue_ring_full(q_data); - break; + if (q->d->device_variant == VRB1_VARIANT) { + cbs_in_tb = get_num_cbs_in_tb_ldpc_enc(&ops[i]->ldpc_enc); + /* Check if there are available space for further processing. */ + if (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) { + acc_enqueue_ring_full(q_data); + break; + } + descs_used = vrb1_enqueue_ldpc_enc_one_op_tb(q, ops[i], + enqueued_descs, cbs_in_tb); + } else { + if (unlikely(avail < 1)) { + acc_enqueue_ring_full(q_data); + break; + } + descs_used = vrb2_enqueue_ldpc_enc_one_op_tb(q, ops[i], enqueued_descs); } - - descs_used = vrb1_enqueue_ldpc_enc_one_op_tb(q, ops[i], enqueued_descs, cbs_in_tb); if (descs_used < 0) { acc_enqueue_invalid(q_data); break; @@ -2550,7 +2984,6 @@ vrb_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, break; } avail -= 1; - rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n", i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index, ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count, @@ -2678,6 +3111,7 @@ vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0); op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= ((rsp.engine_hung) ? (1 << RTE_BBDEV_ENGINE_ERROR) : 0); if (desc->req.last_desc_in_batch) { (*aq_dequeued)++; @@ -2698,26 +3132,71 @@ vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, return desc->req.numCBs; } -/* Dequeue one LDPC encode operations from device in TB mode. - * That operation may cover multiple descriptors. - */ +/* Dequeue one LDPC encode operations from VRB2 device in TB mode. */ static inline int -vrb_dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +vrb2_dequeue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, uint16_t *dequeued_ops, uint32_t *aq_dequeued, - uint16_t *dequeued_descs, uint16_t max_requested_ops) + uint16_t *dequeued_descs) { - union acc_dma_desc *desc, *last_desc, atom_desc; + union acc_dma_desc *desc, atom_desc; union acc_dma_rsp_desc rsp; struct rte_bbdev_enc_op *op; - uint8_t i = 0; - uint16_t current_dequeued_descs = 0, descs_in_tb; desc = acc_desc_tail(q, *dequeued_descs); atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); - if (*dequeued_ops + 1 > max_requested_ops) - return -1; - + /* Check fdone bit. */ + if (!(atom_desc.rsp.val & ACC_FDONE)) + return -1; + + rsp.val = atom_desc.rsp.val; + rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val); + + /* Dequeue. */ + op = desc->req.op_addr; + + /* Clearing status, it will be set based on response. */ + op->status = 0; + op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR; + op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR; + op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR; + op->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR; + + if (desc->req.last_desc_in_batch) { + (*aq_dequeued)++; + desc->req.last_desc_in_batch = 0; + } + desc->rsp.val = ACC_DMA_DESC_TYPE; + desc->rsp.add_info_0 = 0; /* Reserved bits. */ + desc->rsp.add_info_1 = 0; /* Reserved bits. */ + + /* One op was successfully dequeued */ + ref_op[0] = op; + (*dequeued_descs)++; + (*dequeued_ops)++; + return 1; +} + +/* Dequeue one LDPC encode operations from device in TB mode. + * That operation may cover multiple descriptors. + */ +static inline int +vrb_dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + uint16_t *dequeued_ops, uint32_t *aq_dequeued, + uint16_t *dequeued_descs, uint16_t max_requested_ops) +{ + union acc_dma_desc *desc, *last_desc, atom_desc; + union acc_dma_rsp_desc rsp; + struct rte_bbdev_enc_op *op; + uint8_t i = 0; + uint16_t current_dequeued_descs = 0, descs_in_tb; + + desc = acc_desc_tail(q, *dequeued_descs); + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); + + if (*dequeued_ops + 1 > max_requested_ops) + return -1; + /* Check fdone bit. */ if (!(atom_desc.rsp.val & ACC_FDONE)) return -1; @@ -2748,6 +3227,7 @@ vrb_dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0); op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= ((rsp.engine_hung) ? (1 << RTE_BBDEV_ENGINE_ERROR) : 0); if (desc->req.last_desc_in_batch) { (*aq_dequeued)++; @@ -2794,6 +3274,8 @@ vrb_dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0); op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR; + if (op->status != 0) { /* These errors are not expected. */ q_data->queue_stats.dequeue_err_count++; @@ -2847,6 +3329,7 @@ vrb_dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR; op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR; op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR; + op->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR; if (op->status != 0) q_data->queue_stats.dequeue_err_count++; @@ -2928,6 +3411,7 @@ vrb_dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0); op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= ((rsp.engine_hung) ? (1 << RTE_BBDEV_ENGINE_ERROR) : 0); if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK)) tb_crc_check ^= desc->rsp.add_info_1; @@ -2979,7 +3463,6 @@ vrb_dequeue_enc(struct rte_bbdev_queue_data *q_data, if (avail == 0) return 0; op = acc_op_tail(q, 0); - cbm = op->turbo_enc.code_block_mode; for (i = 0; i < avail; i++) { @@ -3022,9 +3505,14 @@ vrb_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, for (i = 0; i < avail; i++) { if (cbm == RTE_BBDEV_TRANSPORT_BLOCK) - ret = vrb_dequeue_enc_one_op_tb(q, &ops[dequeued_ops], - &dequeued_ops, &aq_dequeued, - &dequeued_descs, num); + if (q->d->device_variant == VRB1_VARIANT) + ret = vrb_dequeue_enc_one_op_tb(q, &ops[dequeued_ops], + &dequeued_ops, &aq_dequeued, + &dequeued_descs, num); + else + ret = vrb2_dequeue_ldpc_enc_one_op_tb(q, &ops[dequeued_ops], + &dequeued_ops, &aq_dequeued, + &dequeued_descs); else ret = vrb_dequeue_enc_one_op_cb(q, &ops[dequeued_ops], &dequeued_ops, &aq_dequeued, @@ -3154,63 +3642,139 @@ vrb1_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft *fcw) fcw->bypass = 0; } +/* Fill in a frame control word for FFT processing. */ +static inline void +vrb2_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft_3 *fcw) +{ + fcw->in_frame_size = op->fft.input_sequence_size; + fcw->leading_pad_size = op->fft.input_leading_padding; + fcw->out_frame_size = op->fft.output_sequence_size; + fcw->leading_depad_size = op->fft.output_leading_depadding; + fcw->cs_window_sel = op->fft.window_index[0] + + (op->fft.window_index[1] << 8) + + (op->fft.window_index[2] << 16) + + (op->fft.window_index[3] << 24); + fcw->cs_window_sel2 = op->fft.window_index[4] + + (op->fft.window_index[5] << 8); + fcw->cs_enable_bmap = op->fft.cs_bitmap; + fcw->num_antennas = op->fft.num_antennas_log2; + fcw->idft_size = op->fft.idft_log2; + fcw->dft_size = op->fft.dft_log2; + fcw->cs_offset = op->fft.cs_time_adjustment; + fcw->idft_shift = op->fft.idft_shift; + fcw->dft_shift = op->fft.dft_shift; + fcw->cs_multiplier = op->fft.ncs_reciprocal; + fcw->power_shift = op->fft.power_shift; + fcw->exp_adj = op->fft.fp16_exp_adjust; + fcw->fp16_in = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_FP16_INPUT); + fcw->fp16_out = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_FP16_OUTPUT); + fcw->power_en = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_POWER_MEAS); + if (check_bit(op->fft.op_flags, + RTE_BBDEV_FFT_IDFT_BYPASS)) { + if (check_bit(op->fft.op_flags, + RTE_BBDEV_FFT_WINDOWING_BYPASS)) + fcw->bypass = 2; + else + fcw->bypass = 1; + } else if (check_bit(op->fft.op_flags, + RTE_BBDEV_FFT_DFT_BYPASS)) + fcw->bypass = 3; + else + fcw->bypass = 0; +} + static inline int -vrb1_dma_desc_fft_fill(struct rte_bbdev_fft_op *op, +vrb_dma_desc_fft_fill(struct rte_bbdev_fft_op *op, struct acc_dma_req_desc *desc, - struct rte_mbuf *input, struct rte_mbuf *output, - uint32_t *in_offset, uint32_t *out_offset) + struct rte_mbuf *input, struct rte_mbuf *output, struct rte_mbuf *win_input, + struct rte_mbuf *pwr, uint32_t *in_offset, uint32_t *out_offset, + uint32_t *win_offset, uint32_t *pwr_offset, uint16_t device_variant) { - /* FCW already done. */ + bool pwr_en = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_POWER_MEAS); + bool win_en = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_DEWINDOWING); + int num_cs = 0, i, bd_idx = 1; + + if (device_variant == VRB1_VARIANT) { + /* Force unsupported descriptor format out. */ + pwr_en = 0; + win_en = 0; + } + + /* FCW already done */ acc_header_init(desc); - desc->data_ptrs[1].address = rte_pktmbuf_iova_offset(input, *in_offset); - desc->data_ptrs[1].blen = op->fft.input_sequence_size * 4; - desc->data_ptrs[1].blkid = ACC_DMA_BLKID_IN; - desc->data_ptrs[1].last = 1; - desc->data_ptrs[1].dma_ext = 0; - desc->data_ptrs[2].address = rte_pktmbuf_iova_offset(output, *out_offset); - desc->data_ptrs[2].blen = op->fft.output_sequence_size * 4; - desc->data_ptrs[2].blkid = ACC_DMA_BLKID_OUT_HARD; - desc->data_ptrs[2].last = 1; - desc->data_ptrs[2].dma_ext = 0; - desc->m2dlen = 2; - desc->d2mlen = 1; + + RTE_SET_USED(win_input); + RTE_SET_USED(win_offset); + + desc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(input, *in_offset); + desc->data_ptrs[bd_idx].blen = op->fft.input_sequence_size * ACC_IQ_SIZE; + desc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_IN; + desc->data_ptrs[bd_idx].last = 1; + desc->data_ptrs[bd_idx].dma_ext = 0; + bd_idx++; + + desc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(output, *out_offset); + desc->data_ptrs[bd_idx].blen = op->fft.output_sequence_size * ACC_IQ_SIZE; + desc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_OUT_HARD; + desc->data_ptrs[bd_idx].last = pwr_en ? 0 : 1; + desc->data_ptrs[bd_idx].dma_ext = 0; + desc->m2dlen = win_en ? 3 : 2; + desc->d2mlen = pwr_en ? 2 : 1; desc->ib_ant_offset = op->fft.input_sequence_size; desc->num_ant = op->fft.num_antennas_log2 - 3; - int num_cs = 0, i; - for (i = 0; i < 12; i++) + + for (i = 0; i < RTE_BBDEV_MAX_CS; i++) if (check_bit(op->fft.cs_bitmap, 1 << i)) num_cs++; desc->num_cs = num_cs; + + if (pwr_en && pwr) { + bd_idx++; + desc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(pwr, *pwr_offset); + desc->data_ptrs[bd_idx].blen = num_cs * (1 << op->fft.num_antennas_log2) * 4; + desc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_OUT_SOFT; + desc->data_ptrs[bd_idx].last = 1; + desc->data_ptrs[bd_idx].dma_ext = 0; + } desc->ob_cyc_offset = op->fft.output_sequence_size; desc->ob_ant_offset = op->fft.output_sequence_size * num_cs; desc->op_addr = op; return 0; } - /** Enqueue one FFT operation for device. */ static inline int vrb_enqueue_fft_one_op(struct acc_queue *q, struct rte_bbdev_fft_op *op, uint16_t total_enqueued_cbs) { union acc_dma_desc *desc; - struct rte_mbuf *input, *output; - uint32_t in_offset, out_offset; + struct rte_mbuf *input, *output, *pwr, *win; + uint32_t in_offset, out_offset, pwr_offset, win_offset; struct acc_fcw_fft *fcw; desc = acc_desc(q, total_enqueued_cbs); input = op->fft.base_input.data; output = op->fft.base_output.data; + pwr = op->fft.power_meas_output.data; + win = op->fft.dewindowing_input.data; in_offset = op->fft.base_input.offset; out_offset = op->fft.base_output.offset; - fcw = &desc->req.fcw_fft; + pwr_offset = op->fft.power_meas_output.offset; + win_offset = op->fft.dewindowing_input.offset; + + fcw = (struct acc_fcw_fft *) (q->fcw_ring + + ((q->sw_ring_head + total_enqueued_cbs) & q->sw_ring_wrap_mask) + * ACC_MAX_FCW_SIZE); - vrb1_fcw_fft_fill(op, fcw); - vrb1_dma_desc_fft_fill(op, &desc->req, input, output, &in_offset, &out_offset); + if (q->d->device_variant == VRB1_VARIANT) + vrb1_fcw_fft_fill(op, fcw); + else + vrb2_fcw_fft_fill(op, (struct acc_fcw_fft_3 *) fcw); + vrb_dma_desc_fft_fill(op, &desc->req, input, output, win, pwr, + &in_offset, &out_offset, &win_offset, &pwr_offset, q->d->device_variant); #ifdef RTE_LIBRTE_BBDEV_DEBUG - rte_memdump(stderr, "FCW", &desc->req.fcw_fft, - sizeof(desc->req.fcw_fft)); - rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc)); + rte_memdump(stderr, "FCW", fcw, 128); + rte_memdump(stderr, "Req Desc.", desc, 128); #endif return 1; } @@ -3283,6 +3847,7 @@ vrb_dequeue_fft_one_op(struct rte_bbdev_queue_data *q_data, op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR; op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR; op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR; + op->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR; if (op->status != 0) q_data->queue_stats.dequeue_err_count++; @@ -3329,6 +3894,367 @@ vrb_dequeue_fft(struct rte_bbdev_queue_data *q_data, return i; } +/* Fill in a frame control word for MLD-TS processing. */ +static inline void +vrb2_fcw_mldts_fill(struct rte_bbdev_mldts_op *op, struct acc_fcw_mldts *fcw) +{ + fcw->nrb = op->mldts.num_rbs; + fcw->NLayers = op->mldts.num_layers - 1; + fcw->Qmod0 = (op->mldts.q_m[0] >> 1) - 1; + fcw->Qmod1 = (op->mldts.q_m[1] >> 1) - 1; + fcw->Qmod2 = (op->mldts.q_m[2] >> 1) - 1; + fcw->Qmod3 = (op->mldts.q_m[3] >> 1) - 1; + /* Mark some layers as disabled */ + if (op->mldts.num_layers == 2) { + fcw->Qmod2 = 3; + fcw->Qmod3 = 3; + } + if (op->mldts.num_layers == 3) + fcw->Qmod3 = 3; + fcw->Rrep = op->mldts.r_rep; + fcw->Crep = op->mldts.c_rep; +} + +/* Fill in descriptor for one MLD-TS processing operation. */ +static inline int +vrb2_dma_desc_mldts_fill(struct rte_bbdev_mldts_op *op, + struct acc_dma_req_desc *desc, + struct rte_mbuf *input_q, struct rte_mbuf *input_r, + struct rte_mbuf *output, + uint32_t *in_offset, uint32_t *out_offset) +{ + uint16_t qsize_per_re[VRB2_MLD_LAY_SIZE] = {8, 12, 16}; /* Layer 2 to 4. */ + uint16_t rsize_per_re[VRB2_MLD_LAY_SIZE] = {14, 26, 42}; + uint16_t sc_factor_per_rrep[VRB2_MLD_RREP_SIZE] = {12, 6, 4, 3, 0, 2}; + uint16_t i, outsize_per_re = 0; + uint32_t sc_num, r_num, q_size, r_size, out_size; + + /* Prevent out of range access. */ + if (op->mldts.r_rep > 5) + op->mldts.r_rep = 5; + if (op->mldts.num_layers < 2) + op->mldts.num_layers = 2; + if (op->mldts.num_layers > 4) + op->mldts.num_layers = 4; + for (i = 0; i < op->mldts.num_layers; i++) + outsize_per_re += op->mldts.q_m[i]; + sc_num = op->mldts.num_rbs * RTE_BBDEV_SCPERRB * (op->mldts.c_rep + 1); + r_num = op->mldts.num_rbs * sc_factor_per_rrep[op->mldts.r_rep]; + q_size = qsize_per_re[op->mldts.num_layers - 2] * sc_num; + r_size = rsize_per_re[op->mldts.num_layers - 2] * r_num; + out_size = sc_num * outsize_per_re; + + /* FCW already done. */ + acc_header_init(desc); + desc->data_ptrs[1].address = rte_pktmbuf_iova_offset(input_q, *in_offset); + desc->data_ptrs[1].blen = q_size; + desc->data_ptrs[1].blkid = ACC_DMA_BLKID_IN; + desc->data_ptrs[1].last = 0; + desc->data_ptrs[1].dma_ext = 0; + desc->data_ptrs[2].address = rte_pktmbuf_iova_offset(input_r, *in_offset); + desc->data_ptrs[2].blen = r_size; + desc->data_ptrs[2].blkid = ACC_DMA_BLKID_IN_MLD_R; + desc->data_ptrs[2].last = 1; + desc->data_ptrs[2].dma_ext = 0; + desc->data_ptrs[3].address = rte_pktmbuf_iova_offset(output, *out_offset); + desc->data_ptrs[3].blen = out_size; + desc->data_ptrs[3].blkid = ACC_DMA_BLKID_OUT_HARD; + desc->data_ptrs[3].last = 1; + desc->data_ptrs[3].dma_ext = 0; + desc->m2dlen = 3; + desc->d2mlen = 1; + desc->op_addr = op; + desc->cbs_in_tb = 1; + + return 0; +} + +/* Check whether the MLD operation can be processed as a single operation. */ +static inline bool +vrb2_check_mld_r_constraint(struct rte_bbdev_mldts_op *op) { + uint8_t layer_idx, rrep_idx; + uint16_t max_rb[VRB2_MLD_LAY_SIZE][VRB2_MLD_RREP_SIZE] = { + {188, 275, 275, 275, 0, 275}, + {101, 202, 275, 275, 0, 275}, + {62, 124, 186, 248, 0, 275} }; + + if (op->mldts.c_rep == 0) + return true; + + layer_idx = RTE_MIN(op->mldts.num_layers - VRB2_MLD_MIN_LAYER, + VRB2_MLD_MAX_LAYER - VRB2_MLD_MIN_LAYER); + rrep_idx = RTE_MIN(op->mldts.r_rep, VRB2_MLD_MAX_RREP); + rte_bbdev_log_debug("RB %d index %d %d max %d\n", op->mldts.num_rbs, layer_idx, rrep_idx, + max_rb[layer_idx][rrep_idx]); + + return (op->mldts.num_rbs <= max_rb[layer_idx][rrep_idx]); +} + +/** Enqueue MLDTS operation split across symbols. */ +static inline int +enqueue_mldts_split_op(struct acc_queue *q, struct rte_bbdev_mldts_op *op, + uint16_t total_enqueued_descs) +{ + uint16_t qsize_per_re[VRB2_MLD_LAY_SIZE] = {8, 12, 16}; /* Layer 2 to 4. */ + uint16_t rsize_per_re[VRB2_MLD_LAY_SIZE] = {14, 26, 42}; + uint16_t sc_factor_per_rrep[VRB2_MLD_RREP_SIZE] = {12, 6, 4, 3, 0, 2}; + uint32_t i, outsize_per_re = 0, sc_num, r_num, q_size, r_size, out_size, num_syms; + union acc_dma_desc *desc, *first_desc; + uint16_t desc_idx, symb; + struct rte_mbuf *input_q, *input_r, *output; + uint32_t in_offset, out_offset; + struct acc_fcw_mldts *fcw; + + desc_idx = acc_desc_idx(q, total_enqueued_descs); + first_desc = q->ring_addr + desc_idx; + input_q = op->mldts.qhy_input.data; + input_r = op->mldts.r_input.data; + output = op->mldts.output.data; + in_offset = op->mldts.qhy_input.offset; + out_offset = op->mldts.output.offset; + num_syms = op->mldts.c_rep + 1; + fcw = &first_desc->req.fcw_mldts; + vrb2_fcw_mldts_fill(op, fcw); + fcw->Crep = 0; /* C rep forced to zero. */ + + /* Prevent out of range access. */ + if (op->mldts.r_rep > 5) + op->mldts.r_rep = 5; + if (op->mldts.num_layers < 2) + op->mldts.num_layers = 2; + if (op->mldts.num_layers > 4) + op->mldts.num_layers = 4; + + for (i = 0; i < op->mldts.num_layers; i++) + outsize_per_re += op->mldts.q_m[i]; + sc_num = op->mldts.num_rbs * RTE_BBDEV_SCPERRB; /* C rep forced to zero. */ + r_num = op->mldts.num_rbs * sc_factor_per_rrep[op->mldts.r_rep]; + q_size = qsize_per_re[op->mldts.num_layers - 2] * sc_num; + r_size = rsize_per_re[op->mldts.num_layers - 2] * r_num; + out_size = sc_num * outsize_per_re; + + for (symb = 0; symb < num_syms; symb++) { + desc_idx = ((q->sw_ring_head + total_enqueued_descs + symb) & q->sw_ring_wrap_mask); + desc = q->ring_addr + desc_idx; + acc_header_init(&desc->req); + if (symb == 0) + desc->req.cbs_in_tb = num_syms; + else + rte_memcpy(&desc->req.fcw_mldts, fcw, ACC_FCW_MLDTS_BLEN); + desc->req.data_ptrs[1].address = rte_pktmbuf_iova_offset(input_q, in_offset); + desc->req.data_ptrs[1].blen = q_size; + in_offset += q_size; + desc->req.data_ptrs[1].blkid = ACC_DMA_BLKID_IN; + desc->req.data_ptrs[1].last = 0; + desc->req.data_ptrs[1].dma_ext = 0; + desc->req.data_ptrs[2].address = rte_pktmbuf_iova_offset(input_r, 0); + desc->req.data_ptrs[2].blen = r_size; + desc->req.data_ptrs[2].blkid = ACC_DMA_BLKID_IN_MLD_R; + desc->req.data_ptrs[2].last = 1; + desc->req.data_ptrs[2].dma_ext = 0; + desc->req.data_ptrs[3].address = rte_pktmbuf_iova_offset(output, out_offset); + desc->req.data_ptrs[3].blen = out_size; + out_offset += out_size; + desc->req.data_ptrs[3].blkid = ACC_DMA_BLKID_OUT_HARD; + desc->req.data_ptrs[3].last = 1; + desc->req.data_ptrs[3].dma_ext = 0; + desc->req.m2dlen = VRB2_MLD_M2DLEN; + desc->req.d2mlen = 1; + desc->req.op_addr = op; + +#ifdef RTE_LIBRTE_BBDEV_DEBUG + rte_memdump(stderr, "FCW", &desc->req.fcw_mldts, sizeof(desc->req.fcw_mldts)); + rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc)); +#endif + } + desc->req.sdone_enable = 0; + + return num_syms; +} + +/** Enqueue one MLDTS operation. */ +static inline int +enqueue_mldts_one_op(struct acc_queue *q, struct rte_bbdev_mldts_op *op, + uint16_t total_enqueued_descs) +{ + union acc_dma_desc *desc; + struct rte_mbuf *input_q, *input_r, *output; + uint32_t in_offset, out_offset; + struct acc_fcw_mldts *fcw; + + desc = acc_desc(q, total_enqueued_descs); + input_q = op->mldts.qhy_input.data; + input_r = op->mldts.r_input.data; + output = op->mldts.output.data; + in_offset = op->mldts.qhy_input.offset; + out_offset = op->mldts.output.offset; + fcw = &desc->req.fcw_mldts; + vrb2_fcw_mldts_fill(op, fcw); + vrb2_dma_desc_mldts_fill(op, &desc->req, input_q, input_r, output, + &in_offset, &out_offset); +#ifdef RTE_LIBRTE_BBDEV_DEBUG + rte_memdump(stderr, "FCW", &desc->req.fcw_mldts, sizeof(desc->req.fcw_mldts)); + rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc)); +#endif + return 1; +} + +/* Enqueue MLDTS operations. */ +static uint16_t +vrb2_enqueue_mldts(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_mldts_op **ops, uint16_t num) +{ + int32_t aq_avail, avail; + struct acc_queue *q = q_data->queue_private; + uint16_t i, enqueued_descs = 0, descs_in_op; + int ret; + bool as_one_op; + + aq_avail = acc_aq_avail(q_data, num); + if (unlikely((aq_avail <= 0) || (num == 0))) + return 0; + avail = acc_ring_avail_enq(q); + + for (i = 0; i < num; ++i) { + as_one_op = vrb2_check_mld_r_constraint(ops[i]); + descs_in_op = as_one_op ? 1 : ops[i]->mldts.c_rep + 1; + + /* Check if there are available space for further processing. */ + if (unlikely(avail < descs_in_op)) { + acc_enqueue_ring_full(q_data); + break; + } + avail -= descs_in_op; + + if (as_one_op) + ret = enqueue_mldts_one_op(q, ops[i], enqueued_descs); + else + ret = enqueue_mldts_split_op(q, ops[i], enqueued_descs); + + if (ret < 0) { + acc_enqueue_invalid(q_data); + break; + } + + enqueued_descs += ret; + } + + if (unlikely(i == 0)) + return 0; /* Nothing to enqueue. */ + + acc_dma_enqueue(q, enqueued_descs, &q_data->queue_stats); + + /* Update stats. */ + q_data->queue_stats.enqueued_count += i; + q_data->queue_stats.enqueue_err_count += num - i; + return i; +} + +/* + * Dequeue one MLDTS operation. + * This may have been split over multiple descriptors. + */ +static inline int +dequeue_mldts_one_op(struct rte_bbdev_queue_data *q_data, + struct acc_queue *q, struct rte_bbdev_mldts_op **ref_op, + uint16_t dequeued_ops, uint32_t *aq_dequeued) +{ + union acc_dma_desc *desc, atom_desc, *last_desc; + union acc_dma_rsp_desc rsp; + struct rte_bbdev_mldts_op *op; + uint8_t descs_in_op, i; + + desc = acc_desc_tail(q, dequeued_ops); + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); + + /* Check fdone bit. */ + if (!(atom_desc.rsp.val & ACC_FDONE)) + return -1; + + descs_in_op = desc->req.cbs_in_tb; + if (descs_in_op > 1) { + /* Get last CB. */ + last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1); + /* Check if last op is ready to dequeue by checking fdone bit. If not exit. */ + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED); + if (!(atom_desc.rsp.val & ACC_FDONE)) + return -1; +#ifdef RTE_LIBRTE_BBDEV_DEBUG + rte_memdump(stderr, "Last Resp", &last_desc->rsp.val, sizeof(desc->rsp.val)); +#endif + /* Check each operation iteratively using fdone. */ + for (i = 1; i < descs_in_op - 1; i++) { + last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) + & q->sw_ring_wrap_mask); + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, + __ATOMIC_RELAXED); + if (!(atom_desc.rsp.val & ACC_FDONE)) + return -1; + } + } +#ifdef RTE_LIBRTE_BBDEV_DEBUG + rte_memdump(stderr, "Resp", &desc->rsp.val, sizeof(desc->rsp.val)); +#endif + /* Dequeue. */ + op = desc->req.op_addr; + + /* Clearing status, it will be set based on response. */ + op->status = 0; + + for (i = 0; i < descs_in_op; i++) { + desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask); + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); + rsp.val = atom_desc.rsp.val; + op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR; + op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR; + op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR; + op->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR; + } + + if (op->status != 0) + q_data->queue_stats.dequeue_err_count++; + if (op->status & (1 << RTE_BBDEV_DRV_ERROR)) + vrb_check_ir(q->d); + + /* Check if this is the last desc in batch (Atomic Queue). */ + if (desc->req.last_desc_in_batch) { + (*aq_dequeued)++; + desc->req.last_desc_in_batch = 0; + } + desc->rsp.val = ACC_DMA_DESC_TYPE; + desc->rsp.add_info_0 = 0; + *ref_op = op; + + return descs_in_op; +} + +/* Dequeue MLDTS operations from VRB2 device. */ +static uint16_t +vrb2_dequeue_mldts(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_mldts_op **ops, uint16_t num) +{ + struct acc_queue *q = q_data->queue_private; + uint16_t dequeue_num, i, dequeued_cbs = 0; + uint32_t avail = acc_ring_avail_deq(q); + uint32_t aq_dequeued = 0; + int ret; + + dequeue_num = RTE_MIN(avail, num); + + for (i = 0; i < dequeue_num; ++i) { + ret = dequeue_mldts_one_op(q_data, q, &ops[i], dequeued_cbs, &aq_dequeued); + if (ret <= 0) + break; + dequeued_cbs += ret; + } + + q->aq_dequeued += aq_dequeued; + q->sw_ring_tail += dequeued_cbs; + /* Update enqueue stats. */ + q_data->queue_stats.dequeued_count += i; + return i; +} + /* Initialization Function */ static void vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv) @@ -3347,6 +4273,8 @@ vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv) dev->dequeue_ldpc_dec_ops = vrb_dequeue_ldpc_dec; dev->enqueue_fft_ops = vrb_enqueue_fft; dev->dequeue_fft_ops = vrb_dequeue_fft; + dev->enqueue_mldts_ops = vrb2_enqueue_mldts; + dev->dequeue_mldts_ops = vrb2_dequeue_mldts; d->pf_device = !strcmp(drv->driver.name, RTE_STR(VRB_PF_DRIVER_NAME)); d->mmio_base = pci_dev->mem_resource[0].addr; @@ -3356,13 +4284,21 @@ vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv) (pci_dev->id.device_id == RTE_VRB1_VF_DEVICE_ID)) { d->device_variant = VRB1_VARIANT; d->queue_offset = vrb1_queue_offset; - d->fcw_ld_fill = vrb1_fcw_ld_fill; d->num_qgroups = VRB1_NUM_QGRPS; d->num_aqs = VRB1_NUM_AQS; if (d->pf_device) d->reg_addr = &vrb1_pf_reg_addr; else d->reg_addr = &vrb1_vf_reg_addr; + } else { + d->device_variant = VRB2_VARIANT; + d->queue_offset = vrb2_queue_offset; + d->num_qgroups = VRB2_NUM_QGRPS; + d->num_aqs = VRB2_NUM_AQS; + if (d->pf_device) + d->reg_addr = &vrb2_pf_reg_addr; + else + d->reg_addr = &vrb2_vf_reg_addr; } rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"", @@ -3838,3 +4774,324 @@ vrb1_configure(const char *dev_name, struct rte_acc_conf *conf) rte_bbdev_log_debug("PF Tip configuration complete for %s", dev_name); return 0; } + +/* Initial configuration of a VRB2 device prior to running configure(). */ +int +vrb2_configure(const char *dev_name, struct rte_acc_conf *conf) +{ + rte_bbdev_log(INFO, "vrb2_configure"); + uint32_t value, address, status; + int qg_idx, template_idx, vf_idx, acc, i, aq_reg, static_allocation, numEngines; + int numQgs, numQqsAcc, totalQgs; + int qman_func_id[8] = {0, 2, 1, 3, 4, 5, 0, 0}; + struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name); + int rlim, alen, timestamp; + + /* Compile time checks. */ + RTE_BUILD_BUG_ON(sizeof(struct acc_dma_req_desc) != 256); + RTE_BUILD_BUG_ON(sizeof(union acc_dma_desc) != 256); + RTE_BUILD_BUG_ON(sizeof(struct acc_fcw_td) != 24); + RTE_BUILD_BUG_ON(sizeof(struct acc_fcw_te) != 32); + + if (bbdev == NULL) { + rte_bbdev_log(ERR, + "Invalid dev_name (%s), or device is not yet initialised", + dev_name); + return -ENODEV; + } + struct acc_device *d = bbdev->data->dev_private; + + /* Store configuration. */ + rte_memcpy(&d->acc_conf, conf, sizeof(d->acc_conf)); + + /* Explicitly releasing AXI as this may be stopped after PF FLR/BME. */ + address = VRB2_PfDmaAxiControl; + value = 1; + acc_reg_write(d, address, value); + + /* Set the fabric mode. */ + address = VRB2_PfFabricM2iBufferReg; + value = VRB2_FABRIC_MODE; + acc_reg_write(d, address, value); + + /* Set default descriptor signature. */ + address = VRB2_PfDmaDescriptorSignature; + value = 0; + acc_reg_write(d, address, value); + + /* Enable the Error Detection in DMA. */ + value = VRB2_CFG_DMA_ERROR; + address = VRB2_PfDmaErrorDetectionEn; + acc_reg_write(d, address, value); + + /* AXI Cache configuration. */ + value = VRB2_CFG_AXI_CACHE; + address = VRB2_PfDmaAxcacheReg; + acc_reg_write(d, address, value); + + /* AXI Response configuration. */ + acc_reg_write(d, VRB2_PfDmaCfgRrespBresp, 0x0); + + /* Default DMA Configuration (Qmgr Enabled) */ + acc_reg_write(d, VRB2_PfDmaConfig0Reg, 0); + acc_reg_write(d, VRB2_PfDmaQmanenSelect, 0xFFFFFFFF); + acc_reg_write(d, VRB2_PfDmaQmanen, 0); + + /* Default RLIM/ALEN configuration. */ + rlim = 0; + alen = 3; + timestamp = 0; + address = VRB2_PfDmaConfig1Reg; + value = (1 << 31) + (rlim << 8) + (timestamp << 6) + alen; + acc_reg_write(d, address, value); + + /* Default FFT configuration. */ + for (template_idx = 0; template_idx < VRB2_FFT_NUM; template_idx++) { + acc_reg_write(d, VRB2_PfFftConfig0 + template_idx * 0x1000, VRB2_FFT_CFG_0); + acc_reg_write(d, VRB2_PfFftParityMask8 + template_idx * 0x1000, VRB2_FFT_ECC); + } + + /* Configure DMA Qmanager addresses. */ + address = VRB2_PfDmaQmgrAddrReg; + value = VRB2_PfQmgrEgressQueuesTemplate; + acc_reg_write(d, address, value); + + /* ===== Qmgr Configuration ===== */ + /* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL. */ + totalQgs = conf->q_ul_4g.num_qgroups + conf->q_ul_5g.num_qgroups + + conf->q_dl_4g.num_qgroups + conf->q_dl_5g.num_qgroups + + conf->q_fft.num_qgroups + conf->q_mld.num_qgroups; + for (qg_idx = 0; qg_idx < VRB2_NUM_QGRPS; qg_idx++) { + address = VRB2_PfQmgrDepthLog2Grp + ACC_BYTES_IN_WORD * qg_idx; + value = aqDepth(qg_idx, conf); + acc_reg_write(d, address, value); + address = VRB2_PfQmgrTholdGrp + ACC_BYTES_IN_WORD * qg_idx; + value = (1 << 16) + (1 << (aqDepth(qg_idx, conf) - 1)); + acc_reg_write(d, address, value); + } + + /* Template Priority in incremental order. */ + for (template_idx = 0; template_idx < ACC_NUM_TMPL; template_idx++) { + address = VRB2_PfQmgrGrpTmplateReg0Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_0; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg1Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_1; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg2Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_2; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg3Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_3; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg4Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_4; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg5Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_5; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg6Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_6; + acc_reg_write(d, address, value); + address = VRB2_PfQmgrGrpTmplateReg7Indx + ACC_BYTES_IN_WORD * template_idx; + value = ACC_TMPL_PRI_7; + acc_reg_write(d, address, value); + } + + address = VRB2_PfQmgrGrpPriority; + value = VRB2_CFG_QMGR_HI_P; + acc_reg_write(d, address, value); + + /* Template Configuration. */ + for (template_idx = 0; template_idx < ACC_NUM_TMPL; template_idx++) { + value = 0; + address = VRB2_PfQmgrGrpTmplateEnRegIndx + ACC_BYTES_IN_WORD * template_idx; + acc_reg_write(d, address, value); + } + /* 4GUL */ + numQgs = conf->q_ul_4g.num_qgroups; + numQqsAcc = 0; + value = 0; + for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++) + value |= (1 << qg_idx); + for (template_idx = VRB2_SIG_UL_4G; template_idx <= VRB2_SIG_UL_4G_LAST; + template_idx++) { + address = VRB2_PfQmgrGrpTmplateEnRegIndx + ACC_BYTES_IN_WORD * template_idx; + acc_reg_write(d, address, value); + } + /* 5GUL */ + numQqsAcc += numQgs; + numQgs = conf->q_ul_5g.num_qgroups; + value = 0; + numEngines = 0; + for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++) + value |= (1 << qg_idx); + for (template_idx = VRB2_SIG_UL_5G; template_idx <= VRB2_SIG_UL_5G_LAST; + template_idx++) { + /* Check engine power-on status. */ + address = VRB2_PfFecUl5gIbDebug0Reg + ACC_ENGINE_OFFSET * template_idx; + status = (acc_reg_read(d, address) >> 4) & 0x7; + address = VRB2_PfQmgrGrpTmplateEnRegIndx + ACC_BYTES_IN_WORD * template_idx; + if (status == 1) { + acc_reg_write(d, address, value); + numEngines++; + } else + acc_reg_write(d, address, 0); + } + rte_bbdev_log(INFO, "Number of 5GUL engines %d", numEngines); + /* 4GDL */ + numQqsAcc += numQgs; + numQgs = conf->q_dl_4g.num_qgroups; + value = 0; + for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++) + value |= (1 << qg_idx); + for (template_idx = VRB2_SIG_DL_4G; template_idx <= VRB2_SIG_DL_4G_LAST; + template_idx++) { + address = VRB2_PfQmgrGrpTmplateEnRegIndx + ACC_BYTES_IN_WORD * template_idx; + acc_reg_write(d, address, value); + } + /* 5GDL */ + numQqsAcc += numQgs; + numQgs = conf->q_dl_5g.num_qgroups; + value = 0; + for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++) + value |= (1 << qg_idx); + for (template_idx = VRB2_SIG_DL_5G; template_idx <= VRB2_SIG_DL_5G_LAST; + template_idx++) { + address = VRB2_PfQmgrGrpTmplateEnRegIndx + ACC_BYTES_IN_WORD * template_idx; + acc_reg_write(d, address, value); + } + /* FFT */ + numQqsAcc += numQgs; + numQgs = conf->q_fft.num_qgroups; + value = 0; + for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++) + value |= (1 << qg_idx); + for (template_idx = VRB2_SIG_FFT; template_idx <= VRB2_SIG_FFT_LAST; + template_idx++) { + address = VRB2_PfQmgrGrpTmplateEnRegIndx + ACC_BYTES_IN_WORD * template_idx; + acc_reg_write(d, address, value); + } + /* MLD */ + numQqsAcc += numQgs; + numQgs = conf->q_mld.num_qgroups; + value = 0; + for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++) + value |= (1 << qg_idx); + for (template_idx = VRB2_SIG_MLD; template_idx <= VRB2_SIG_MLD_LAST; + template_idx++) { + address = VRB2_PfQmgrGrpTmplateEnRegIndx + + ACC_BYTES_IN_WORD * template_idx; + acc_reg_write(d, address, value); + } + + /* Queue Group Function mapping. */ + for (i = 0; i < 4; i++) { + value = 0; + for (qg_idx = 0; qg_idx < ACC_NUM_QGRPS_PER_WORD; qg_idx++) { + acc = accFromQgid(qg_idx + i * ACC_NUM_QGRPS_PER_WORD, conf); + value |= qman_func_id[acc] << (qg_idx * 4); + } + acc_reg_write(d, VRB2_PfQmgrGrpFunction0 + i * ACC_BYTES_IN_WORD, value); + } + + /* Configuration of the Arbitration QGroup depth to 1. */ + for (qg_idx = 0; qg_idx < VRB2_NUM_QGRPS; qg_idx++) { + address = VRB2_PfQmgrArbQDepthGrp + ACC_BYTES_IN_WORD * qg_idx; + value = 0; + acc_reg_write(d, address, value); + } + + static_allocation = 1; + if (static_allocation == 1) { + /* This pointer to ARAM (512kB) is shifted by 2 (4B per register). */ + uint32_t aram_address = 0; + for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) { + for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) { + address = VRB2_PfQmgrVfBaseAddr + vf_idx + * ACC_BYTES_IN_WORD + qg_idx + * ACC_BYTES_IN_WORD * 64; + value = aram_address; + acc_reg_fast_write(d, address, value); + /* Offset ARAM Address for next memory bank - increment of 4B. */ + aram_address += aqNum(qg_idx, conf) * + (1 << aqDepth(qg_idx, conf)); + } + } + if (aram_address > VRB2_WORDS_IN_ARAM_SIZE) { + rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n", + aram_address, VRB2_WORDS_IN_ARAM_SIZE); + return -EINVAL; + } + } else { + /* Dynamic Qmgr allocation. */ + acc_reg_write(d, VRB2_PfQmgrAramAllocEn, 1); + acc_reg_write(d, VRB2_PfQmgrAramAllocSetupN0, 0x1000); + acc_reg_write(d, VRB2_PfQmgrAramAllocSetupN1, 0); + acc_reg_write(d, VRB2_PfQmgrAramAllocSetupN2, 0); + acc_reg_write(d, VRB2_PfQmgrAramAllocSetupN3, 0); + acc_reg_write(d, VRB2_PfQmgrSoftReset, 1); + acc_reg_write(d, VRB2_PfQmgrSoftReset, 0); + } + + /* ==== HI Configuration ==== */ + + /* No Info Ring/MSI by default. */ + address = VRB2_PfHiInfoRingIntWrEnRegPf; + value = 0; + acc_reg_write(d, address, value); + address = VRB2_PfHiCfgMsiIntWrEnRegPf; + value = 0xFFFFFFFF; + acc_reg_write(d, address, value); + /* Prevent Block on Transmit Error. */ + address = VRB2_PfHiBlockTransmitOnErrorEn; + value = 0; + acc_reg_write(d, address, value); + /* Prevents to drop MSI */ + address = VRB2_PfHiMsiDropEnableReg; + value = 0; + acc_reg_write(d, address, value); + /* Set the PF Mode register */ + address = VRB2_PfHiPfMode; + value = ((conf->pf_mode_en) ? ACC_PF_VAL : 0) | 0x1F07F0; + acc_reg_write(d, address, value); + /* Explicitly releasing AXI after PF Mode. */ + acc_reg_write(d, VRB2_PfDmaAxiControl, 1); + + /* QoS overflow init. */ + value = 1; + address = VRB2_PfQosmonAEvalOverflow0; + acc_reg_write(d, address, value); + address = VRB2_PfQosmonBEvalOverflow0; + acc_reg_write(d, address, value); + + /* Enabling AQueues through the Queue hierarchy. */ + unsigned int en_bitmask[VRB2_AQ_REG_NUM]; + for (vf_idx = 0; vf_idx < VRB2_NUM_VFS; vf_idx++) { + for (qg_idx = 0; qg_idx < VRB2_NUM_QGRPS; qg_idx++) { + for (aq_reg = 0; aq_reg < VRB2_AQ_REG_NUM; aq_reg++) + en_bitmask[aq_reg] = 0; + if (vf_idx < conf->num_vf_bundles && qg_idx < totalQgs) { + for (aq_reg = 0; aq_reg < VRB2_AQ_REG_NUM; aq_reg++) { + if (aqNum(qg_idx, conf) >= 16 * (aq_reg + 1)) + en_bitmask[aq_reg] = 0xFFFF; + else if (aqNum(qg_idx, conf) <= 16 * aq_reg) + en_bitmask[aq_reg] = 0x0; + else + en_bitmask[aq_reg] = (1 << (aqNum(qg_idx, + conf) - aq_reg * 16)) - 1; + } + } + for (aq_reg = 0; aq_reg < VRB2_AQ_REG_NUM; aq_reg++) { + address = VRB2_PfQmgrAqEnableVf + vf_idx * 16 + aq_reg * 4; + value = (qg_idx << 16) + en_bitmask[aq_reg]; + acc_reg_fast_write(d, address, value); + } + } + } + + rte_bbdev_log(INFO, + "VRB2 basic config complete for %s - pf_bb_config should ideally be used instead", + dev_name); + return 0; +} diff --git a/drivers/baseband/acc/vrb1_pf_enum.h b/drivers/baseband/acc/vrb1_pf_enum.h index 82a36685e9e..6dc359800fb 100644 --- a/drivers/baseband/acc/vrb1_pf_enum.h +++ b/drivers/baseband/acc/vrb1_pf_enum.h @@ -98,11 +98,18 @@ enum { ACC_PF_INT_DMA_UL5G_DESC_IRQ = 8, ACC_PF_INT_DMA_DL5G_DESC_IRQ = 9, ACC_PF_INT_DMA_MLD_DESC_IRQ = 10, - ACC_PF_INT_ARAM_ECC_1BIT_ERR = 11, - ACC_PF_INT_PARITY_ERR = 12, - ACC_PF_INT_QMGR_ERR = 13, - ACC_PF_INT_INT_REQ_OVERFLOW = 14, - ACC_PF_INT_APB_TIMEOUT = 15, + ACC_PF_INT_ARAM_ACCESS_ERR = 11, + ACC_PF_INT_ARAM_ECC_1BIT_ERR = 12, + ACC_PF_INT_PARITY_ERR = 13, + ACC_PF_INT_QMGR_OVERFLOW = 14, + ACC_PF_INT_QMGR_ERR = 15, + ACC_PF_INT_ATS_ERR = 22, + ACC_PF_INT_ARAM_FUUL = 23, + ACC_PF_INT_EXTRA_READ = 24, + ACC_PF_INT_COMPLETION_TIMEOUT = 25, + ACC_PF_INT_CORE_HANG = 26, + ACC_PF_INT_DMA_HANG = 28, + ACC_PF_INT_DS_HANG = 27, }; #endif /* VRB1_PF_ENUM_H */ diff --git a/drivers/baseband/acc/vrb2_pf_enum.h b/drivers/baseband/acc/vrb2_pf_enum.h new file mode 100644 index 00000000000..28f10dc35b8 --- /dev/null +++ b/drivers/baseband/acc/vrb2_pf_enum.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef VRB2_PF_ENUM_H +#define VRB2_PF_ENUM_H + +/* + * VRB2 Register mapping on PF BAR0 + * This is automatically generated from RDL, format may change with new RDL + * Release. + * Variable names are as is + */ +enum { + VRB2_PfQmgrEgressQueuesTemplate = 0x0007FC00, + VRB2_PfQmgrIngressAq = 0x00100000, + VRB2_PfQmgrSoftReset = 0x00A00034, + VRB2_PfQmgrAramAllocEn = 0x00A000a0, + VRB2_PfQmgrAramAllocSetupN0 = 0x00A000b0, + VRB2_PfQmgrAramAllocSetupN1 = 0x00A000b4, + VRB2_PfQmgrAramAllocSetupN2 = 0x00A000b8, + VRB2_PfQmgrAramAllocSetupN3 = 0x00A000bc, + VRB2_PfQmgrDepthLog2Grp = 0x00A00200, + VRB2_PfQmgrTholdGrp = 0x00A00300, + VRB2_PfQmgrGrpTmplateReg0Indx = 0x00A00600, + VRB2_PfQmgrGrpTmplateReg1Indx = 0x00A00700, + VRB2_PfQmgrGrpTmplateReg2Indx = 0x00A00800, + VRB2_PfQmgrGrpTmplateReg3Indx = 0x00A00900, + VRB2_PfQmgrGrpTmplateReg4Indx = 0x00A00A00, + VRB2_PfQmgrGrpTmplateReg5Indx = 0x00A00B00, + VRB2_PfQmgrGrpTmplateReg6Indx = 0x00A00C00, + VRB2_PfQmgrGrpTmplateReg7Indx = 0x00A00D00, + VRB2_PfQmgrGrpTmplateEnRegIndx = 0x00A00E00, + VRB2_PfQmgrArbQDepthGrp = 0x00A02F00, + VRB2_PfQmgrGrpFunction0 = 0x00A02F80, + VRB2_PfQmgrGrpPriority = 0x00A02FC0, + VRB2_PfQmgrVfBaseAddr = 0x00A08000, + VRB2_PfQmgrAqEnableVf = 0x00A10000, + VRB2_PfQmgrRingSizeVf = 0x00A20010, + VRB2_PfQmgrGrpDepthLog20Vf = 0x00A20020, + VRB2_PfQmgrGrpDepthLog21Vf = 0x00A20024, + VRB2_PfFabricM2iBufferReg = 0x00B30000, + VRB2_PfFecUl5gIbDebug0Reg = 0x00B401FC, + VRB2_PfFftConfig0 = 0x00B58004, + VRB2_PfFftParityMask8 = 0x00B5803C, + VRB2_PfDmaConfig0Reg = 0x00B80000, + VRB2_PfDmaConfig1Reg = 0x00B80004, + VRB2_PfDmaQmgrAddrReg = 0x00B80008, + VRB2_PfDmaAxcacheReg = 0x00B80010, + VRB2_PfDmaAxiControl = 0x00B8002C, + VRB2_PfDmaQmanen = 0x00B80040, + VRB2_PfDmaQmanenSelect = 0x00B80044, + VRB2_PfDmaCfgRrespBresp = 0x00B80814, + VRB2_PfDmaDescriptorSignature = 0x00B80868, + VRB2_PfDmaErrorDetectionEn = 0x00B80870, + VRB2_PfDmaFec5GulDescBaseLoRegVf = 0x00B88020, + VRB2_PfDmaFec5GulDescBaseHiRegVf = 0x00B88024, + VRB2_PfDmaFec5GulRespPtrLoRegVf = 0x00B88028, + VRB2_PfDmaFec5GulRespPtrHiRegVf = 0x00B8802C, + VRB2_PfDmaFec5GdlDescBaseLoRegVf = 0x00B88040, + VRB2_PfDmaFec5GdlDescBaseHiRegVf = 0x00B88044, + VRB2_PfDmaFec5GdlRespPtrLoRegVf = 0x00B88048, + VRB2_PfDmaFec5GdlRespPtrHiRegVf = 0x00B8804C, + VRB2_PfDmaFec4GulDescBaseLoRegVf = 0x00B88060, + VRB2_PfDmaFec4GulDescBaseHiRegVf = 0x00B88064, + VRB2_PfDmaFec4GulRespPtrLoRegVf = 0x00B88068, + VRB2_PfDmaFec4GulRespPtrHiRegVf = 0x00B8806C, + VRB2_PfDmaFec4GdlDescBaseLoRegVf = 0x00B88080, + VRB2_PfDmaFec4GdlDescBaseHiRegVf = 0x00B88084, + VRB2_PfDmaFec4GdlRespPtrLoRegVf = 0x00B88088, + VRB2_PfDmaFec4GdlRespPtrHiRegVf = 0x00B8808C, + VRB2_PfDmaFftDescBaseLoRegVf = 0x00B880A0, + VRB2_PfDmaFftDescBaseHiRegVf = 0x00B880A4, + VRB2_PfDmaFftRespPtrLoRegVf = 0x00B880A8, + VRB2_PfDmaFftRespPtrHiRegVf = 0x00B880AC, + VRB2_PfDmaMldDescBaseLoRegVf = 0x00B880C0, + VRB2_PfDmaMldDescBaseHiRegVf = 0x00B880C4, + VRB2_PfQosmonAEvalOverflow0 = 0x00B90008, + VRB2_PfPermonACntrlRegVf = 0x00B98000, + VRB2_PfQosmonBEvalOverflow0 = 0x00BA0008, + VRB2_PfPermonBCntrlRegVf = 0x00BA8000, + VRB2_PfPermonCCntrlRegVf = 0x00BB8000, + VRB2_PfHiInfoRingBaseLoRegPf = 0x00C84014, + VRB2_PfHiInfoRingBaseHiRegPf = 0x00C84018, + VRB2_PfHiInfoRingPointerRegPf = 0x00C8401C, + VRB2_PfHiInfoRingIntWrEnRegPf = 0x00C84020, + VRB2_PfHiBlockTransmitOnErrorEn = 0x00C84038, + VRB2_PfHiCfgMsiIntWrEnRegPf = 0x00C84040, + VRB2_PfHiMsixVectorMapperPf = 0x00C84060, + VRB2_PfHiPfMode = 0x00C84108, + VRB2_PfHiClkGateHystReg = 0x00C8410C, + VRB2_PfHiMsiDropEnableReg = 0x00C84114, + VRB2_PfHiSectionPowerGatingReq = 0x00C84128, + VRB2_PfHiSectionPowerGatingAck = 0x00C8412C, +}; + +/* TIP PF Interrupt numbers */ +enum { + VRB2_PF_INT_QMGR_AQ_OVERFLOW = 0, + VRB2_PF_INT_DOORBELL_VF_2_PF = 1, + VRB2_PF_INT_ILLEGAL_FORMAT = 2, + VRB2_PF_INT_QMGR_DISABLED_ACCESS = 3, + VRB2_PF_INT_QMGR_AQ_OVERTHRESHOLD = 4, + VRB2_PF_INT_DMA_DL_DESC_IRQ = 5, + VRB2_PF_INT_DMA_UL_DESC_IRQ = 6, + VRB2_PF_INT_DMA_FFT_DESC_IRQ = 7, + VRB2_PF_INT_DMA_UL5G_DESC_IRQ = 8, + VRB2_PF_INT_DMA_DL5G_DESC_IRQ = 9, + VRB2_PF_INT_DMA_MLD_DESC_IRQ = 10, + VRB2_PF_INT_ARAM_ACCESS_ERR = 11, + VRB2_PF_INT_ARAM_ECC_1BIT_ERR = 12, + VRB2_PF_INT_PARITY_ERR = 13, + VRB2_PF_INT_QMGR_OVERFLOW = 14, + VRB2_PF_INT_QMGR_ERR = 15, + VRB2_PF_INT_ATS_ERR = 22, + VRB2_PF_INT_ARAM_FUUL = 23, + VRB2_PF_INT_EXTRA_READ = 24, + VRB2_PF_INT_COMPLETION_TIMEOUT = 25, + VRB2_PF_INT_CORE_HANG = 26, + VRB2_PF_INT_DMA_HANG = 28, + VRB2_PF_INT_DS_HANG = 27, +}; + +#endif /* VRB2_PF_ENUM_H */ diff --git a/drivers/baseband/acc/vrb2_vf_enum.h b/drivers/baseband/acc/vrb2_vf_enum.h new file mode 100644 index 00000000000..9c6e4510101 --- /dev/null +++ b/drivers/baseband/acc/vrb2_vf_enum.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef VRB2_VF_ENUM_H +#define VRB2_VF_ENUM_H + +/* + * VRB2 Register mapping on VF BAR0 + * This is automatically generated from RDL, format may change with new RDL + */ +enum { + VRB2_VfHiVfToPfDbellVf = 0x00000000, + VRB2_VfHiPfToVfDbellVf = 0x00000008, + VRB2_VfHiInfoRingBaseLoVf = 0x00000010, + VRB2_VfHiInfoRingBaseHiVf = 0x00000014, + VRB2_VfHiInfoRingPointerVf = 0x00000018, + VRB2_VfHiInfoRingIntWrEnVf = 0x00000020, + VRB2_VfHiInfoRingPf2VfWrEnVf = 0x00000024, + VRB2_VfHiMsixVectorMapperVf = 0x00000060, + VRB2_VfHiDeviceStatus = 0x00000068, + VRB2_VfHiInterruptSrc = 0x00000070, + VRB2_VfDmaFec5GulDescBaseLoRegVf = 0x00000120, + VRB2_VfDmaFec5GulDescBaseHiRegVf = 0x00000124, + VRB2_VfDmaFec5GulRespPtrLoRegVf = 0x00000128, + VRB2_VfDmaFec5GulRespPtrHiRegVf = 0x0000012C, + VRB2_VfDmaFec5GdlDescBaseLoRegVf = 0x00000140, + VRB2_VfDmaFec5GdlDescBaseHiRegVf = 0x00000144, + VRB2_VfDmaFec5GdlRespPtrLoRegVf = 0x00000148, + VRB2_VfDmaFec5GdlRespPtrHiRegVf = 0x0000014C, + VRB2_VfDmaFec4GulDescBaseLoRegVf = 0x00000160, + VRB2_VfDmaFec4GulDescBaseHiRegVf = 0x00000164, + VRB2_VfDmaFec4GulRespPtrLoRegVf = 0x00000168, + VRB2_VfDmaFec4GulRespPtrHiRegVf = 0x0000016C, + VRB2_VfDmaFec4GdlDescBaseLoRegVf = 0x00000180, + VRB2_VfDmaFec4GdlDescBaseHiRegVf = 0x00000184, + VRB2_VfDmaFec4GdlRespPtrLoRegVf = 0x00000188, + VRB2_VfDmaFec4GdlRespPtrHiRegVf = 0x0000018C, + VRB2_VfDmaFftDescBaseLoRegVf = 0x000001A0, + VRB2_VfDmaFftDescBaseHiRegVf = 0x000001A4, + VRB2_VfDmaFftRespPtrLoRegVf = 0x000001A8, + VRB2_VfDmaFftRespPtrHiRegVf = 0x000001AC, + VRB2_VfDmaMldDescBaseLoRegVf = 0x000001C0, + VRB2_VfDmaMldDescBaseHiRegVf = 0x000001C4, + VRB2_VfDmaMldRespPtrLoRegVf = 0x000001C8, + VRB2_VfDmaMldRespPtrHiRegVf = 0x000001CC, + VRB2_VfPmACntrlRegVf = 0x00000200, + VRB2_VfPmACountVf = 0x00000208, + VRB2_VfPmAKCntLoVf = 0x00000210, + VRB2_VfPmAKCntHiVf = 0x00000214, + VRB2_VfPmADeltaCntLoVf = 0x00000220, + VRB2_VfPmADeltaCntHiVf = 0x00000224, + VRB2_VfPmBCntrlRegVf = 0x00000240, + VRB2_VfPmBCountVf = 0x00000248, + VRB2_VfPmBKCntLoVf = 0x00000250, + VRB2_VfPmBKCntHiVf = 0x00000254, + VRB2_VfPmBDeltaCntLoVf = 0x00000260, + VRB2_VfPmBDeltaCntHiVf = 0x00000264, + VRB2_VfPmCCntrlRegVf = 0x00000280, + VRB2_VfPmCCountVf = 0x00000288, + VRB2_VfPmCKCntLoVf = 0x00000290, + VRB2_VfPmCKCntHiVf = 0x00000294, + VRB2_VfPmCDeltaCntLoVf = 0x000002A0, + VRB2_VfPmCDeltaCntHiVf = 0x000002A4, + VRB2_VfPmDCntrlRegVf = 0x000002C0, + VRB2_VfPmDCountVf = 0x000002C8, + VRB2_VfPmDKCntLoVf = 0x000002D0, + VRB2_VfPmDKCntHiVf = 0x000002D4, + VRB2_VfPmDDeltaCntLoVf = 0x000002E0, + VRB2_VfPmDDeltaCntHiVf = 0x000002E4, + VRB2_VfPmECntrlRegVf = 0x00000300, + VRB2_VfPmECountVf = 0x00000308, + VRB2_VfPmEKCntLoVf = 0x00000310, + VRB2_VfPmEKCntHiVf = 0x00000314, + VRB2_VfPmEDeltaCntLoVf = 0x00000320, + VRB2_VfPmEDeltaCntHiVf = 0x00000324, + VRB2_VfPmFCntrlRegVf = 0x00000340, + VRB2_VfPmFCountVf = 0x00000348, + VRB2_VfPmFKCntLoVf = 0x00000350, + VRB2_VfPmFKCntHiVf = 0x00000354, + VRB2_VfPmFDeltaCntLoVf = 0x00000360, + VRB2_VfPmFDeltaCntHiVf = 0x00000364, + VRB2_VfQmgrAqReset0 = 0x00000600, + VRB2_VfQmgrAqReset1 = 0x00000604, + VRB2_VfQmgrAqReset2 = 0x00000608, + VRB2_VfQmgrAqReset3 = 0x0000060C, + VRB2_VfQmgrRingSizeVf = 0x00000610, + VRB2_VfQmgrGrpDepthLog20Vf = 0x00000620, + VRB2_VfQmgrGrpDepthLog21Vf = 0x00000624, + VRB2_VfQmgrGrpDepthLog22Vf = 0x00000628, + VRB2_VfQmgrGrpDepthLog23Vf = 0x0000062C, + VRB2_VfQmgrGrpFunction0Vf = 0x00000630, + VRB2_VfQmgrGrpFunction1Vf = 0x00000634, + VRB2_VfQmgrAramUsageN0 = 0x00000640, + VRB2_VfQmgrAramUsageN1 = 0x00000644, + VRB2_VfQmgrAramUsageN2 = 0x00000648, + VRB2_VfQmgrAramUsageN3 = 0x0000064C, + VRB2_VfHiMSIXBaseLoRegVf = 0x00001000, + VRB2_VfHiMSIXBaseHiRegVf = 0x00001004, + VRB2_VfHiMSIXBaseDataRegVf = 0x00001008, + VRB2_VfHiMSIXBaseMaskRegVf = 0x0000100C, + VRB2_VfHiMSIXPBABaseLoRegVf = 0x00003000, + VRB2_VfQmgrIngressAq = 0x00004000, +}; + +/* TIP VF Interrupt numbers */ +enum { + VRB2_VF_INT_QMGR_AQ_OVERFLOW = 0, + VRB2_VF_INT_DOORBELL_PF_2_VF = 1, + VRB2_VF_INT_ILLEGAL_FORMAT = 2, + VRB2_VF_INT_QMGR_DISABLED_ACCESS = 3, + VRB2_VF_INT_QMGR_AQ_OVERTHRESHOLD = 4, + VRB2_VF_INT_DMA_DL_DESC_IRQ = 5, + VRB2_VF_INT_DMA_UL_DESC_IRQ = 6, + VRB2_VF_INT_DMA_FFT_DESC_IRQ = 7, + VRB2_VF_INT_DMA_UL5G_DESC_IRQ = 8, + VRB2_VF_INT_DMA_DL5G_DESC_IRQ = 9, + VRB2_VF_INT_DMA_MLD_DESC_IRQ = 10, +}; + +#endif /* VRB2_VF_ENUM_H */ diff --git a/drivers/baseband/acc/vrb_cfg.h b/drivers/baseband/acc/vrb_cfg.h index e3c8902b463..79487c4e047 100644 --- a/drivers/baseband/acc/vrb_cfg.h +++ b/drivers/baseband/acc/vrb_cfg.h @@ -29,4 +29,20 @@ int vrb1_configure(const char *dev_name, struct rte_acc_conf *conf); +/** + * Configure a VRB2 device. + * + * @param dev_name + * The name of the device. This is the short form of PCI BDF, e.g. 00:01.0. + * It can also be retrieved for a bbdev device from the dev_name field in the + * rte_bbdev_info structure returned by rte_bbdev_info_get(). + * @param conf + * Configuration to apply to VRB2 HW. + * + * @return + * Zero on success, negative value on failure. + */ +int +vrb2_configure(const char *dev_name, struct rte_acc_conf *conf); + #endif /* _VRB_CFG_H_ */ diff --git a/drivers/baseband/acc/vrb_pmd.h b/drivers/baseband/acc/vrb_pmd.h index 01028273e77..0371db9972e 100644 --- a/drivers/baseband/acc/vrb_pmd.h +++ b/drivers/baseband/acc/vrb_pmd.h @@ -8,6 +8,8 @@ #include "acc_common.h" #include "vrb1_pf_enum.h" #include "vrb1_vf_enum.h" +#include "vrb2_pf_enum.h" +#include "vrb2_vf_enum.h" #include "vrb_cfg.h" /* Helper macro for logging */ @@ -31,12 +33,13 @@ #define RTE_VRB1_VENDOR_ID (0x8086) #define RTE_VRB1_PF_DEVICE_ID (0x57C0) #define RTE_VRB1_VF_DEVICE_ID (0x57C1) - -#define VRB1_VARIANT 2 +#define RTE_VRB2_VENDOR_ID (0x8086) +#define RTE_VRB2_PF_DEVICE_ID (0x57C2) +#define RTE_VRB2_VF_DEVICE_ID (0x57C3) #define VRB_NUM_ACCS 6 #define VRB_MAX_QGRPS 32 -#define VRB_MAX_AQS 32 +#define VRB_MAX_AQS 64 #define ACC_STATUS_WAIT 10 #define ACC_STATUS_TO 100 @@ -46,8 +49,6 @@ #define VRB1_NUM_VFS 16 #define VRB1_NUM_QGRPS 16 #define VRB1_NUM_AQS 16 -#define VRB1_GRP_ID_SHIFT 10 /* Queue Index Hierarchy */ -#define VRB1_VF_ID_SHIFT 4 /* Queue Index Hierarchy */ #define VRB1_WORDS_IN_ARAM_SIZE (256 * 1024 / 4) /* VRB1 Mapping of signals for the available engines */ @@ -61,7 +62,6 @@ #define VRB1_SIG_DL_4G_LAST 23 #define VRB1_SIG_FFT 24 #define VRB1_SIG_FFT_LAST 24 - #define VRB1_NUM_ACCS 5 /* VRB1 Configuration */ @@ -90,6 +90,67 @@ #define VRB1_MAX_PF_MSIX (256+32) #define VRB1_MAX_VF_MSIX (256+7) +/* VRB2 specific flags */ + +#define VRB2_NUM_VFS 64 +#define VRB2_NUM_QGRPS 32 +#define VRB2_NUM_AQS 64 +#define VRB2_WORDS_IN_ARAM_SIZE (512 * 1024 / 4) +#define VRB2_NUM_ACCS 6 +#define VRB2_AQ_REG_NUM 4 + +/* VRB2 Mapping of signals for the available engines */ +#define VRB2_SIG_UL_5G 0 +#define VRB2_SIG_UL_5G_LAST 5 +#define VRB2_SIG_DL_5G 9 +#define VRB2_SIG_DL_5G_LAST 11 +#define VRB2_SIG_UL_4G 12 +#define VRB2_SIG_UL_4G_LAST 16 +#define VRB2_SIG_DL_4G 21 +#define VRB2_SIG_DL_4G_LAST 23 +#define VRB2_SIG_FFT 24 +#define VRB2_SIG_FFT_LAST 26 +#define VRB2_SIG_MLD 30 +#define VRB2_SIG_MLD_LAST 31 +#define VRB2_FFT_NUM 3 + +#define VRB2_FCW_MLDTS_BLEN 32 +#define VRB2_MLD_MIN_LAYER 2 +#define VRB2_MLD_MAX_LAYER 4 +#define VRB2_MLD_MAX_RREP 5 +#define VRB2_MLD_LAY_SIZE 3 +#define VRB2_MLD_RREP_SIZE 6 +#define VRB2_MLD_M2DLEN 3 + +#define VRB2_MAX_PF_MSIX (256+32) +#define VRB2_MAX_VF_MSIX (64+7) +#define VRB2_REG_IRQ_EN_ALL 0xFFFFFFFF /* Enable all interrupts */ +#define VRB2_FABRIC_MODE 0x8000103 +#define VRB2_CFG_DMA_ERROR 0x7DF +#define VRB2_CFG_AXI_CACHE 0x11 +#define VRB2_CFG_QMGR_HI_P 0x0F0F +#define VRB2_RESET_HARD 0x1FF +#define VRB2_ENGINES_MAX 9 +#define VRB2_GPEX_AXIMAP_NUM 17 +#define VRB2_CLOCK_GATING_EN 0x30000 +#define VRB2_FFT_CFG_0 0x2001 +#define VRB2_FFT_ECC 0x60 +#define VRB2_FFT_RAM_EN 0x80008000 +#define VRB2_FFT_RAM_DIS 0x0 +#define VRB2_FFT_RAM_SIZE 512 +#define VRB2_CLK_EN 0x00010A01 +#define VRB2_CLK_DIS 0x01F10A01 +#define VRB2_PG_MASK_0 0x1F +#define VRB2_PG_MASK_1 0xF +#define VRB2_PG_MASK_2 0x1 +#define VRB2_PG_MASK_3 0x0 +#define VRB2_PG_MASK_FFT 1 +#define VRB2_PG_MASK_4GUL 4 +#define VRB2_PG_MASK_5GUL 8 +#define VRB2_PF_PM_REG_OFFSET 0x10000 +#define VRB2_VF_PM_REG_OFFSET 0x40 +#define VRB2_PM_START 0x2 + struct acc_registry_addr { unsigned int dma_ring_dl5g_hi; unsigned int dma_ring_dl5g_lo; @@ -101,6 +162,8 @@ struct acc_registry_addr { unsigned int dma_ring_ul4g_lo; unsigned int dma_ring_fft_hi; unsigned int dma_ring_fft_lo; + unsigned int dma_ring_mld_hi; + unsigned int dma_ring_mld_lo; unsigned int ring_size; unsigned int info_ring_hi; unsigned int info_ring_lo; @@ -116,6 +179,8 @@ struct acc_registry_addr { unsigned int tail_ptrs_ul4g_lo; unsigned int tail_ptrs_fft_hi; unsigned int tail_ptrs_fft_lo; + unsigned int tail_ptrs_mld_hi; + unsigned int tail_ptrs_mld_lo; unsigned int depth_log0_offset; unsigned int depth_log1_offset; unsigned int qman_group_func; @@ -140,6 +205,8 @@ static const struct acc_registry_addr vrb1_pf_reg_addr = { .dma_ring_ul4g_lo = VRB1_PfDmaFec4GulDescBaseLoRegVf, .dma_ring_fft_hi = VRB1_PfDmaFftDescBaseHiRegVf, .dma_ring_fft_lo = VRB1_PfDmaFftDescBaseLoRegVf, + .dma_ring_mld_hi = 0, + .dma_ring_mld_lo = 0, .ring_size = VRB1_PfQmgrRingSizeVf, .info_ring_hi = VRB1_PfHiInfoRingBaseHiRegPf, .info_ring_lo = VRB1_PfHiInfoRingBaseLoRegPf, @@ -155,6 +222,8 @@ static const struct acc_registry_addr vrb1_pf_reg_addr = { .tail_ptrs_ul4g_lo = VRB1_PfDmaFec4GulRespPtrLoRegVf, .tail_ptrs_fft_hi = VRB1_PfDmaFftRespPtrHiRegVf, .tail_ptrs_fft_lo = VRB1_PfDmaFftRespPtrLoRegVf, + .tail_ptrs_mld_hi = 0, + .tail_ptrs_mld_lo = 0, .depth_log0_offset = VRB1_PfQmgrGrpDepthLog20Vf, .depth_log1_offset = VRB1_PfQmgrGrpDepthLog21Vf, .qman_group_func = VRB1_PfQmgrGrpFunction0, @@ -179,6 +248,8 @@ static const struct acc_registry_addr vrb1_vf_reg_addr = { .dma_ring_ul4g_lo = VRB1_VfDmaFec4GulDescBaseLoRegVf, .dma_ring_fft_hi = VRB1_VfDmaFftDescBaseHiRegVf, .dma_ring_fft_lo = VRB1_VfDmaFftDescBaseLoRegVf, + .dma_ring_mld_hi = 0, + .dma_ring_mld_lo = 0, .ring_size = VRB1_VfQmgrRingSizeVf, .info_ring_hi = VRB1_VfHiInfoRingBaseHiVf, .info_ring_lo = VRB1_VfHiInfoRingBaseLoVf, @@ -194,6 +265,8 @@ static const struct acc_registry_addr vrb1_vf_reg_addr = { .tail_ptrs_ul4g_lo = VRB1_VfDmaFec4GulRespPtrLoRegVf, .tail_ptrs_fft_hi = VRB1_VfDmaFftRespPtrHiRegVf, .tail_ptrs_fft_lo = VRB1_VfDmaFftRespPtrLoRegVf, + .tail_ptrs_mld_hi = 0, + .tail_ptrs_mld_lo = 0, .depth_log0_offset = VRB1_VfQmgrGrpDepthLog20Vf, .depth_log1_offset = VRB1_VfQmgrGrpDepthLog21Vf, .qman_group_func = VRB1_VfQmgrGrpFunction0Vf, @@ -206,4 +279,92 @@ static const struct acc_registry_addr vrb1_vf_reg_addr = { .pf2vf_doorbell = VRB1_VfHiPfToVfDbellVf, }; + +/* Structure holding registry addresses for PF */ +static const struct acc_registry_addr vrb2_pf_reg_addr = { + .dma_ring_dl5g_hi = VRB2_PfDmaFec5GdlDescBaseHiRegVf, + .dma_ring_dl5g_lo = VRB2_PfDmaFec5GdlDescBaseLoRegVf, + .dma_ring_ul5g_hi = VRB2_PfDmaFec5GulDescBaseHiRegVf, + .dma_ring_ul5g_lo = VRB2_PfDmaFec5GulDescBaseLoRegVf, + .dma_ring_dl4g_hi = VRB2_PfDmaFec4GdlDescBaseHiRegVf, + .dma_ring_dl4g_lo = VRB2_PfDmaFec4GdlDescBaseLoRegVf, + .dma_ring_ul4g_hi = VRB2_PfDmaFec4GulDescBaseHiRegVf, + .dma_ring_ul4g_lo = VRB2_PfDmaFec4GulDescBaseLoRegVf, + .dma_ring_fft_hi = VRB2_PfDmaFftDescBaseHiRegVf, + .dma_ring_fft_lo = VRB2_PfDmaFftDescBaseLoRegVf, + .dma_ring_mld_hi = VRB2_PfDmaMldDescBaseHiRegVf, + .dma_ring_mld_lo = VRB2_PfDmaMldDescBaseLoRegVf, + .ring_size = VRB2_PfQmgrRingSizeVf, + .info_ring_hi = VRB2_PfHiInfoRingBaseHiRegPf, + .info_ring_lo = VRB2_PfHiInfoRingBaseLoRegPf, + .info_ring_en = VRB2_PfHiInfoRingIntWrEnRegPf, + .info_ring_ptr = VRB2_PfHiInfoRingPointerRegPf, + .tail_ptrs_dl5g_hi = VRB2_PfDmaFec5GdlRespPtrHiRegVf, + .tail_ptrs_dl5g_lo = VRB2_PfDmaFec5GdlRespPtrLoRegVf, + .tail_ptrs_ul5g_hi = VRB2_PfDmaFec5GulRespPtrHiRegVf, + .tail_ptrs_ul5g_lo = VRB2_PfDmaFec5GulRespPtrLoRegVf, + .tail_ptrs_dl4g_hi = VRB2_PfDmaFec4GdlRespPtrHiRegVf, + .tail_ptrs_dl4g_lo = VRB2_PfDmaFec4GdlRespPtrLoRegVf, + .tail_ptrs_ul4g_hi = VRB2_PfDmaFec4GulRespPtrHiRegVf, + .tail_ptrs_ul4g_lo = VRB2_PfDmaFec4GulRespPtrLoRegVf, + .tail_ptrs_fft_hi = VRB2_PfDmaFftRespPtrHiRegVf, + .tail_ptrs_fft_lo = VRB2_PfDmaFftRespPtrLoRegVf, + .tail_ptrs_mld_hi = VRB2_PfDmaFftRespPtrHiRegVf, + .tail_ptrs_mld_lo = VRB2_PfDmaFftRespPtrLoRegVf, + .depth_log0_offset = VRB2_PfQmgrGrpDepthLog20Vf, + .depth_log1_offset = VRB2_PfQmgrGrpDepthLog21Vf, + .qman_group_func = VRB2_PfQmgrGrpFunction0, + .hi_mode = VRB2_PfHiMsixVectorMapperPf, + .pf_mode = VRB2_PfHiPfMode, + .pmon_ctrl_a = VRB2_PfPermonACntrlRegVf, + .pmon_ctrl_b = VRB2_PfPermonBCntrlRegVf, + .pmon_ctrl_c = VRB2_PfPermonCCntrlRegVf, + .vf2pf_doorbell = 0, + .pf2vf_doorbell = 0, +}; + +/* Structure holding registry addresses for VF */ +static const struct acc_registry_addr vrb2_vf_reg_addr = { + .dma_ring_dl5g_hi = VRB2_VfDmaFec5GdlDescBaseHiRegVf, + .dma_ring_dl5g_lo = VRB2_VfDmaFec5GdlDescBaseLoRegVf, + .dma_ring_ul5g_hi = VRB2_VfDmaFec5GulDescBaseHiRegVf, + .dma_ring_ul5g_lo = VRB2_VfDmaFec5GulDescBaseLoRegVf, + .dma_ring_dl4g_hi = VRB2_VfDmaFec4GdlDescBaseHiRegVf, + .dma_ring_dl4g_lo = VRB2_VfDmaFec4GdlDescBaseLoRegVf, + .dma_ring_ul4g_hi = VRB2_VfDmaFec4GulDescBaseHiRegVf, + .dma_ring_ul4g_lo = VRB2_VfDmaFec4GulDescBaseLoRegVf, + .dma_ring_fft_hi = VRB2_VfDmaFftDescBaseHiRegVf, + .dma_ring_fft_lo = VRB2_VfDmaFftDescBaseLoRegVf, + .dma_ring_mld_hi = VRB2_VfDmaMldDescBaseHiRegVf, + .dma_ring_mld_lo = VRB2_VfDmaMldDescBaseLoRegVf, + .ring_size = VRB2_VfQmgrRingSizeVf, + .info_ring_hi = VRB2_VfHiInfoRingBaseHiVf, + .info_ring_lo = VRB2_VfHiInfoRingBaseLoVf, + .info_ring_en = VRB2_VfHiInfoRingIntWrEnVf, + .info_ring_ptr = VRB2_VfHiInfoRingPointerVf, + .tail_ptrs_dl5g_hi = VRB2_VfDmaFec5GdlRespPtrHiRegVf, + .tail_ptrs_dl5g_lo = VRB2_VfDmaFec5GdlRespPtrLoRegVf, + .tail_ptrs_ul5g_hi = VRB2_VfDmaFec5GulRespPtrHiRegVf, + .tail_ptrs_ul5g_lo = VRB2_VfDmaFec5GulRespPtrLoRegVf, + .tail_ptrs_dl4g_hi = VRB2_VfDmaFec4GdlRespPtrHiRegVf, + .tail_ptrs_dl4g_lo = VRB2_VfDmaFec4GdlRespPtrLoRegVf, + .tail_ptrs_ul4g_hi = VRB2_VfDmaFec4GulRespPtrHiRegVf, + .tail_ptrs_ul4g_lo = VRB2_VfDmaFec4GulRespPtrLoRegVf, + .tail_ptrs_fft_hi = VRB2_VfDmaFftRespPtrHiRegVf, + .tail_ptrs_fft_lo = VRB2_VfDmaFftRespPtrLoRegVf, + .tail_ptrs_mld_hi = VRB2_VfDmaMldRespPtrHiRegVf, + .tail_ptrs_mld_lo = VRB2_VfDmaMldRespPtrLoRegVf, + .depth_log0_offset = VRB2_VfQmgrGrpDepthLog20Vf, + .depth_log1_offset = VRB2_VfQmgrGrpDepthLog21Vf, + .qman_group_func = VRB2_VfQmgrGrpFunction0Vf, + .hi_mode = VRB2_VfHiMsixVectorMapperVf, + .pf_mode = 0, + .pmon_ctrl_a = VRB2_VfPmACntrlRegVf, + .pmon_ctrl_b = VRB2_VfPmBCntrlRegVf, + .pmon_ctrl_c = VRB2_VfPmCCntrlRegVf, + .vf2pf_doorbell = VRB2_VfHiVfToPfDbellVf, + .pf2vf_doorbell = VRB2_VfHiPfToVfDbellVf, +}; + + #endif /* _VRB_PMD_H_ */ diff --git a/drivers/baseband/fpga_5gnr_fec/meson.build b/drivers/baseband/fpga_5gnr_fec/meson.build index 745cd271f28..c3678d23ebf 100644 --- a/drivers/baseband/fpga_5gnr_fec/meson.build +++ b/drivers/baseband/fpga_5gnr_fec/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2020 Intel Corporation -deps += ['bbdev', 'bus_vdev', 'ring', 'pci', 'bus_pci'] +deps += ['bus_vdev', 'ring', 'pci', 'bus_pci'] sources = files('rte_fpga_5gnr_fec.c') diff --git a/drivers/baseband/fpga_lte_fec/meson.build b/drivers/baseband/fpga_lte_fec/meson.build index e00688da336..14e07826ef7 100644 --- a/drivers/baseband/fpga_lte_fec/meson.build +++ b/drivers/baseband/fpga_lte_fec/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2019 Intel Corporation -deps += ['bbdev', 'bus_vdev', 'ring', 'pci', 'bus_pci'] +deps += ['bus_vdev', 'ring', 'pci', 'bus_pci'] sources = files('fpga_lte_fec.c') diff --git a/drivers/baseband/la12xx/meson.build b/drivers/baseband/la12xx/meson.build index 7a017dcffab..7b7e41c961d 100644 --- a/drivers/baseband/la12xx/meson.build +++ b/drivers/baseband/la12xx/meson.build @@ -1,6 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021 NXP -deps += ['bbdev', 'bus_vdev', 'ring'] +deps += ['bus_vdev', 'ring'] sources = files('bbdev_la12xx.c') diff --git a/drivers/baseband/meson.build b/drivers/baseband/meson.build index 1d732da8822..3420d98564e 100644 --- a/drivers/baseband/meson.build +++ b/drivers/baseband/meson.build @@ -14,4 +14,6 @@ drivers = [ 'turbo_sw', ] +std_deps = [ 'bbdev' ] + log_prefix = 'pmd.bb' diff --git a/drivers/baseband/null/meson.build b/drivers/baseband/null/meson.build index 02ef7db573b..22863f0bd82 100644 --- a/drivers/baseband/null/meson.build +++ b/drivers/baseband/null/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca Boccassi -deps += ['bbdev', 'bus_vdev', 'ring'] +deps += ['bus_vdev', 'ring'] sources = files('bbdev_null.c') diff --git a/drivers/baseband/turbo_sw/meson.build b/drivers/baseband/turbo_sw/meson.build index aeb9a76f9ea..a9035a753e9 100644 --- a/drivers/baseband/turbo_sw/meson.build +++ b/drivers/baseband/turbo_sw/meson.build @@ -26,5 +26,5 @@ if dep_dec5g.found() cflags += ['-DRTE_BBDEV_SDK_AVX512'] endif -deps += ['bbdev', 'bus_vdev', 'ring'] +deps += ['bus_vdev', 'ring'] sources = files('bbdev_turbo_software.c') diff --git a/drivers/bus/cdx/bus_cdx_driver.h b/drivers/bus/cdx/bus_cdx_driver.h index fcacdb58962..1c9a64c87ac 100644 --- a/drivers/bus/cdx/bus_cdx_driver.h +++ b/drivers/bus/cdx/bus_cdx_driver.h @@ -37,6 +37,9 @@ struct rte_cdx_bus; static const char DRV_EXP_TAG(name, cdx_tbl_export)[] __rte_used = \ RTE_STR(table) +/** Device needs resource mapping */ +#define RTE_CDX_DRV_NEED_MAPPING 0x0001 + /** * A structure describing an ID for a CDX driver. Each driver provides a * table of these IDs for each device that it supports. diff --git a/drivers/bus/cdx/cdx.c b/drivers/bus/cdx/cdx.c index f9526e08ccd..541aae76c3f 100644 --- a/drivers/bus/cdx/cdx.c +++ b/drivers/bus/cdx/cdx.c @@ -383,10 +383,12 @@ cdx_probe_one_driver(struct rte_cdx_driver *dr, CDX_BUS_DEBUG(" probe device %s using driver: %s", dev_name, dr->driver.name); - ret = cdx_vfio_map_resource(dev); - if (ret != 0) { - CDX_BUS_ERR("CDX map device failed: %d", ret); - goto error_map_device; + if (dr->drv_flags & RTE_CDX_DRV_NEED_MAPPING) { + ret = cdx_vfio_map_resource(dev); + if (ret != 0) { + CDX_BUS_ERR("CDX map device failed: %d", ret); + goto error_map_device; + } } /* call the driver probe() function */ diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c index 3949bf87129..83db0a534e5 100644 --- a/drivers/bus/dpaa/base/qbman/qman.c +++ b/drivers/bus/dpaa/base/qbman/qman.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2008-2016 Freescale Semiconductor Inc. - * Copyright 2017,2019 NXP + * Copyright 2017,2019-2023 NXP * */ @@ -897,7 +897,7 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is) /* Lookup in the retirement table */ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid)); - DPAA_BUG_ON(!fq); + DPAA_BUG_ON(fq != NULL); fq_state_change(p, fq, &swapped_msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, &swapped_msg); @@ -909,6 +909,7 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is) #else fq = (void *)(uintptr_t)msg->fq.contextB; #endif + DPAA_BUG_ON(fq != NULL); fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, &swapped_msg); diff --git a/drivers/bus/ifpga/bus_ifpga_driver.h b/drivers/bus/ifpga/bus_ifpga_driver.h index 7b75c2ddbc4..5bbe36d6e0a 100644 --- a/drivers/bus/ifpga/bus_ifpga_driver.h +++ b/drivers/bus/ifpga/bus_ifpga_driver.h @@ -16,6 +16,7 @@ extern "C" { #endif /* __cplusplus */ #include +#include #include #include #include diff --git a/drivers/bus/pci/linux/pci_init.h b/drivers/bus/pci/linux/pci_init.h index d842809ccd7..a4d37c0d0a8 100644 --- a/drivers/bus/pci/linux/pci_init.h +++ b/drivers/bus/pci/linux/pci_init.h @@ -52,24 +52,6 @@ int pci_uio_ioport_unmap(struct rte_pci_ioport *p); #ifdef VFIO_PRESENT -#ifdef PCI_MSIX_TABLE_BIR -#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR -#else -#define RTE_PCI_MSIX_TABLE_BIR 0x7 -#endif - -#ifdef PCI_MSIX_TABLE_OFFSET -#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET -#else -#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8 -#endif - -#ifdef PCI_MSIX_FLAGS_QSIZE -#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE -#else -#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff -#endif - /* access config space */ int pci_vfio_read_config(const struct rte_pci_device *dev, void *buf, size_t len, off_t offs); diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c index 2bf16e9369c..97d740dfe53 100644 --- a/drivers/bus/pci/linux/pci_uio.c +++ b/drivers/bus/pci/linux/pci_uio.c @@ -10,7 +10,6 @@ #include #include #include -#include #if defined(RTE_ARCH_X86) #include @@ -77,35 +76,6 @@ pci_uio_mmio_write(const struct rte_pci_device *dev, int bar, return len; } -static int -pci_uio_set_bus_master(int dev_fd) -{ - uint16_t reg; - int ret; - - ret = pread(dev_fd, ®, sizeof(reg), PCI_COMMAND); - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, - "Cannot read command from PCI config space!\n"); - return -1; - } - - /* return if bus mastering is already on */ - if (reg & PCI_COMMAND_MASTER) - return 0; - - reg |= PCI_COMMAND_MASTER; - - ret = pwrite(dev_fd, ®, sizeof(reg), PCI_COMMAND); - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, - "Cannot write command to PCI config space!\n"); - return -1; - } - - return 0; -} - static int pci_mknod_uio_dev(const char *sysfs_uio_path, unsigned uio_num) { @@ -299,7 +269,7 @@ pci_uio_alloc_resource(struct rte_pci_device *dev, goto error; /* set bus master that is not done by uio_pci_generic */ - if (pci_uio_set_bus_master(uio_cfg_fd)) { + if (rte_pci_set_bus_master(dev, true)) { RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n"); goto error; } diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c index e634de8322e..3f3201daf2f 100644 --- a/drivers/bus/pci/linux/pci_vfio.c +++ b/drivers/bus/pci/linux/pci_vfio.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -107,84 +106,38 @@ pci_vfio_write_config(const struct rte_pci_device *dev, /* get PCI BAR number where MSI-X interrupts are */ static int -pci_vfio_get_msix_bar(const struct rte_pci_device *dev, int fd, +pci_vfio_get_msix_bar(const struct rte_pci_device *dev, struct pci_msix_table *msix_table) { - int ret; - uint32_t reg; - uint16_t flags; - uint8_t cap_id, cap_offset; - uint64_t size, offset; - - if (pci_vfio_get_region(dev, VFIO_PCI_CONFIG_REGION_INDEX, - &size, &offset) != 0) { - RTE_LOG(ERR, EAL, "Cannot get offset of CONFIG region.\n"); - return -1; - } + off_t cap_offset; - /* read PCI capability pointer from config space */ - ret = pread64(fd, ®, sizeof(reg), offset + PCI_CAPABILITY_LIST); - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, - "Cannot read capability pointer from PCI config space!\n"); + cap_offset = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX); + if (cap_offset < 0) return -1; - } - /* we need first byte */ - cap_offset = reg & 0xFF; + if (cap_offset != 0) { + uint16_t flags; + uint32_t reg; - while (cap_offset) { - - /* read PCI capability ID */ - ret = pread64(fd, ®, sizeof(reg), offset + cap_offset); - if (ret != sizeof(reg)) { + if (rte_pci_read_config(dev, ®, sizeof(reg), cap_offset + + RTE_PCI_MSIX_TABLE) < 0) { RTE_LOG(ERR, EAL, - "Cannot read capability ID from PCI config space!\n"); + "Cannot read MSIX table from PCI config space!\n"); return -1; } - /* we need first byte */ - cap_id = reg & 0xFF; - - /* if we haven't reached MSI-X, check next capability */ - if (cap_id != PCI_CAP_ID_MSIX) { - ret = pread64(fd, ®, sizeof(reg), offset + cap_offset); - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, - "Cannot read capability pointer from PCI config space!\n"); - return -1; - } - - /* we need second byte */ - cap_offset = (reg & 0xFF00) >> 8; - - continue; + if (rte_pci_read_config(dev, &flags, sizeof(flags), cap_offset + + RTE_PCI_MSIX_FLAGS) < 0) { + RTE_LOG(ERR, EAL, + "Cannot read MSIX flags from PCI config space!\n"); + return -1; } - /* else, read table offset */ - else { - /* table offset resides in the next 4 bytes */ - ret = pread64(fd, ®, sizeof(reg), offset + cap_offset + 4); - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, - "Cannot read table offset from PCI config space!\n"); - return -1; - } - ret = pread64(fd, &flags, sizeof(flags), offset + cap_offset + 2); - if (ret != sizeof(flags)) { - RTE_LOG(ERR, EAL, - "Cannot read table flags from PCI config space!\n"); - return -1; - } - - msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR; - msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET; - msix_table->size = - 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE)); - - return 0; - } + msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR; + msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET; + msix_table->size = 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE)); } + return 0; } @@ -202,18 +155,18 @@ pci_vfio_enable_bus_memory(struct rte_pci_device *dev, int dev_fd) return -1; } - ret = pread64(dev_fd, &cmd, sizeof(cmd), offset + PCI_COMMAND); + ret = pread64(dev_fd, &cmd, sizeof(cmd), offset + RTE_PCI_COMMAND); if (ret != sizeof(cmd)) { RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n"); return -1; } - if (cmd & PCI_COMMAND_MEMORY) + if (cmd & RTE_PCI_COMMAND_MEMORY) return 0; - cmd |= PCI_COMMAND_MEMORY; - ret = pwrite64(dev_fd, &cmd, sizeof(cmd), offset + PCI_COMMAND); + cmd |= RTE_PCI_COMMAND_MEMORY; + ret = pwrite64(dev_fd, &cmd, sizeof(cmd), offset + RTE_PCI_COMMAND); if (ret != sizeof(cmd)) { RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n"); @@ -223,42 +176,6 @@ pci_vfio_enable_bus_memory(struct rte_pci_device *dev, int dev_fd) return 0; } -/* set PCI bus mastering */ -static int -pci_vfio_set_bus_master(const struct rte_pci_device *dev, int dev_fd, bool op) -{ - uint64_t size, offset; - uint16_t reg; - int ret; - - if (pci_vfio_get_region(dev, VFIO_PCI_CONFIG_REGION_INDEX, - &size, &offset) != 0) { - RTE_LOG(ERR, EAL, "Cannot get offset of CONFIG region.\n"); - return -1; - } - - ret = pread64(dev_fd, ®, sizeof(reg), offset + PCI_COMMAND); - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n"); - return -1; - } - - if (op) - /* set the master bit */ - reg |= PCI_COMMAND_MASTER; - else - reg &= ~(PCI_COMMAND_MASTER); - - ret = pwrite64(dev_fd, ®, sizeof(reg), offset + PCI_COMMAND); - - if (ret != sizeof(reg)) { - RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n"); - return -1; - } - - return 0; -} - /* set up interrupt support (but not enable interrupts) */ static int pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd) @@ -342,9 +259,6 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd) if (rte_intr_fd_set(dev->intr_handle, fd)) return -1; - if (rte_intr_dev_fd_set(dev->intr_handle, vfio_dev_fd)) - return -1; - switch (i) { case VFIO_PCI_MSIX_IRQ_INDEX: intr_mode = RTE_INTR_MODE_MSIX; @@ -512,14 +426,14 @@ pci_vfio_is_ioport_bar(const struct rte_pci_device *dev, int vfio_dev_fd, } ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar), - offset + PCI_BASE_ADDRESS_0 + bar_index * 4); + offset + RTE_PCI_BASE_ADDRESS_0 + bar_index * 4); if (ret != sizeof(ioport_bar)) { RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n", - PCI_BASE_ADDRESS_0 + bar_index*4); + RTE_PCI_BASE_ADDRESS_0 + bar_index*4); return -1; } - return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0; + return (ioport_bar & RTE_PCI_BASE_ADDRESS_SPACE_IO) != 0; } static int @@ -535,8 +449,7 @@ pci_rte_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd) return -1; } - /* set bus mastering for the device */ - if (pci_vfio_set_bus_master(dev, vfio_dev_fd, true)) { + if (rte_pci_set_bus_master(dev, true)) { RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n"); return -1; } @@ -875,6 +788,9 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) if (ret) return ret; + if (rte_intr_dev_fd_set(dev->intr_handle, vfio_dev_fd)) + goto err_vfio_dev_fd; + /* allocate vfio_res and get region info */ vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0); if (vfio_res == NULL) { @@ -906,7 +822,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) /* get MSI-X BAR, if any (we have to know where it is because we can't * easily mmap it when using VFIO) */ - ret = pci_vfio_get_msix_bar(dev, vfio_dev_fd, &vfio_res->msix_table); + ret = pci_vfio_get_msix_bar(dev, &vfio_res->msix_table); if (ret < 0) { RTE_LOG(ERR, EAL, "%s cannot get MSI-X BAR number!\n", pci_addr); @@ -1226,7 +1142,7 @@ pci_vfio_unmap_resource_primary(struct rte_pci_device *dev) if (vfio_dev_fd < 0) return -1; - if (pci_vfio_set_bus_master(dev, vfio_dev_fd, false)) { + if (rte_pci_set_bus_master(dev, false)) { RTE_LOG(ERR, EAL, "%s cannot unset bus mastering for PCI device!\n", pci_addr); return -1; diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c index 52404ab0fe5..921d957bf60 100644 --- a/drivers/bus/pci/pci_common.c +++ b/drivers/bus/pci/pci_common.c @@ -304,7 +304,7 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr, } } - RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", + RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%04x) device: "PCI_PRI_FMT" (socket %i)\n", dr->driver.name, dev->id.vendor_id, dev->id.device_id, loc->domain, loc->bus, loc->devid, loc->function, dev->device.numa_node); @@ -813,8 +813,62 @@ rte_pci_get_iommu_class(void) return iova_mode; } +bool +rte_pci_has_capability_list(const struct rte_pci_device *dev) +{ + uint16_t status; + + if (rte_pci_read_config(dev, &status, sizeof(status), RTE_PCI_STATUS) != sizeof(status)) + return false; + + return (status & RTE_PCI_STATUS_CAP_LIST) != 0; +} + +off_t +rte_pci_find_capability(const struct rte_pci_device *dev, uint8_t cap) +{ + return rte_pci_find_next_capability(dev, cap, 0); +} + +off_t +rte_pci_find_next_capability(const struct rte_pci_device *dev, uint8_t cap, + off_t offset) +{ + uint8_t pos; + int ttl; + + if (offset == 0) + offset = RTE_PCI_CAPABILITY_LIST; + else + offset += RTE_PCI_CAP_NEXT; + ttl = (RTE_PCI_CFG_SPACE_SIZE - RTE_PCI_STD_HEADER_SIZEOF) / RTE_PCI_CAP_SIZEOF; + + if (rte_pci_read_config(dev, &pos, sizeof(pos), offset) < 0) + return -1; + + while (pos && ttl--) { + uint16_t ent; + uint8_t id; + + offset = pos; + if (rte_pci_read_config(dev, &ent, sizeof(ent), offset) < 0) + return -1; + + id = ent & 0xff; + if (id == 0xff) + break; + + if (id == cap) + return offset; + + pos = (ent >> 8); + } + + return 0; +} + off_t -rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap) +rte_pci_find_ext_capability(const struct rte_pci_device *dev, uint32_t cap) { off_t offset = RTE_PCI_CFG_SPACE_SIZE; uint32_t header; @@ -857,7 +911,7 @@ rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap) } int -rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable) +rte_pci_set_bus_master(const struct rte_pci_device *dev, bool enable) { uint16_t old_cmd, cmd; diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h index 9d59c4aef3c..21e234abf0c 100644 --- a/drivers/bus/pci/rte_bus_pci.h +++ b/drivers/bus/pci/rte_bus_pci.h @@ -68,6 +68,60 @@ void rte_pci_unmap_device(struct rte_pci_device *dev); */ void rte_pci_dump(FILE *f); +/** + * Check whether this device has a PCI capability list. + * + * @param dev + * A pointer to rte_pci_device structure. + * + * @return + * true/false + */ +__rte_experimental +bool rte_pci_has_capability_list(const struct rte_pci_device *dev); + +/** + * Find device's PCI capability. + * + * @param dev + * A pointer to rte_pci_device structure. + * + * @param cap + * Capability to be found, which can be any from + * RTE_PCI_CAP_ID_*, defined in librte_pci. + * + * @return + * > 0: The offset of the next matching capability structure + * within the device's PCI configuration space. + * < 0: An error in PCI config space read. + * = 0: Device does not support it. + */ +__rte_experimental +off_t rte_pci_find_capability(const struct rte_pci_device *dev, uint8_t cap); + +/** + * Find device's PCI capability starting from a previous offset in PCI + * configuration space. + * + * @param dev + * A pointer to rte_pci_device structure. + * + * @param cap + * Capability to be found, which can be any from + * RTE_PCI_CAP_ID_*, defined in librte_pci. + * @param offset + * An offset in the PCI configuration space from which the capability is + * looked for. + * + * @return + * > 0: The offset of the next matching capability structure + * within the device's PCI configuration space. + * < 0: An error in PCI config space read. + * = 0: Device does not support it. + */ +__rte_experimental +off_t rte_pci_find_next_capability(const struct rte_pci_device *dev, uint8_t cap, off_t offset); + /** * Find device's extended PCI capability. * @@ -85,7 +139,7 @@ void rte_pci_dump(FILE *f); * = 0: Device does not support it. */ __rte_experimental -off_t rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap); +off_t rte_pci_find_ext_capability(const struct rte_pci_device *dev, uint32_t cap); /** * Enables/Disables Bus Master for device's PCI command register. @@ -99,7 +153,7 @@ off_t rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap); * 0 on success, -1 on error in PCI config space read/write. */ __rte_experimental -int rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable); +int rte_pci_set_bus_master(const struct rte_pci_device *dev, bool enable); /** * Read PCI config space. diff --git a/drivers/bus/pci/version.map b/drivers/bus/pci/version.map index a0000f7938b..74c5b075d5c 100644 --- a/drivers/bus/pci/version.map +++ b/drivers/bus/pci/version.map @@ -25,6 +25,11 @@ EXPERIMENTAL { # added in 23.07 rte_pci_mmio_read; rte_pci_mmio_write; + + # added in 23.11 + rte_pci_find_capability; + rte_pci_find_next_capability; + rte_pci_has_capability_list; }; INTERNAL { diff --git a/drivers/bus/platform/platform.c b/drivers/bus/platform/platform.c index 9b6ed2832e9..7e24bb96a1f 100644 --- a/drivers/bus/platform/platform.c +++ b/drivers/bus/platform/platform.c @@ -617,8 +617,8 @@ platform_bus_cleanup(void) struct rte_platform_device *pdev, *tmp; RTE_TAILQ_FOREACH_SAFE(pdev, &platform_bus.device_list, next, tmp) { - platform_bus_unplug(&pdev->device); TAILQ_REMOVE(&platform_bus.device_list, pdev, next); + platform_bus_unplug(&pdev->device); } return 0; diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c index 95f3ad78bc8..b9139c6e6c6 100644 --- a/drivers/bus/vmbus/vmbus_common.c +++ b/drivers/bus/vmbus/vmbus_common.c @@ -39,6 +39,9 @@ vmbus_map_resource(void *requested_addr, int fd, off_t offset, size_t size, "mmap(%d, %p, %zu, %ld) failed: %s", fd, requested_addr, size, (long)offset, strerror(errno)); + } else { + VMBUS_LOG(DEBUG, " VMBUS memory mapped at %p", + mapaddr); } return mapaddr; } @@ -55,9 +58,10 @@ vmbus_unmap_resource(void *requested_addr, size_t size) VMBUS_LOG(ERR, "munmap(%p, 0x%lx) failed: %s", requested_addr, (unsigned long)size, strerror(errno)); - } else + } else { VMBUS_LOG(DEBUG, " VMBUS memory unmapped at %p", requested_addr); + } } /** @@ -101,7 +105,12 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr, VMBUS_LOG(INFO, "VMBUS device %s on NUMA socket %i", guid, dev->device.numa_node); - /* TODO add block/allow logic */ + /* no initialization when marked as blocked, return without error */ + if (dev->device.devargs != NULL && + dev->device.devargs->policy == RTE_DEV_BLOCKED) { + VMBUS_LOG(INFO, " Device is blocked, not initializing\n"); + return 1; + } /* map resources for device */ ret = rte_vmbus_map_device(dev); @@ -158,6 +167,25 @@ vmbus_probe_all_drivers(struct rte_vmbus_device *dev) return 1; } +static bool +vmbus_ignore_device(struct rte_vmbus_device *dev) +{ + struct rte_devargs *devargs = vmbus_devargs_lookup(dev); + + switch (rte_vmbus_bus.bus.conf.scan_mode) { + case RTE_BUS_SCAN_ALLOWLIST: + if (devargs && devargs->policy == RTE_DEV_ALLOWED) + return false; + break; + case RTE_BUS_SCAN_UNDEFINED: + case RTE_BUS_SCAN_BLOCKLIST: + if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED) + return false; + break; + } + return true; +} + /* * Scan the vmbus, and call the devinit() function for * all registered drivers that have a matching entry in its id_table @@ -175,7 +203,8 @@ rte_vmbus_probe(void) rte_uuid_unparse(dev->device_id, ubuf, sizeof(ubuf)); - /* TODO: add allowlist/blocklist */ + if (vmbus_ignore_device(dev)) + continue; if (vmbus_probe_all_drivers(dev) < 0) { VMBUS_LOG(NOTICE, diff --git a/drivers/common/cnxk/cnxk_security_ar.h b/drivers/common/cnxk/cnxk_security_ar.h index deb38db0d03..d0151a752c0 100644 --- a/drivers/common/cnxk/cnxk_security_ar.h +++ b/drivers/common/cnxk/cnxk_security_ar.h @@ -17,7 +17,7 @@ BITS_PER_LONG_LONG) #define WORD_SHIFT 6 -#define WORD_SIZE (1 << WORD_SHIFT) +#define WORD_SIZE (1ULL << WORD_SHIFT) #define WORD_MASK (WORD_SIZE - 1) #define IPSEC_ANTI_REPLAY_FAILED (-1) diff --git a/drivers/common/cnxk/hw/cpt.h b/drivers/common/cnxk/hw/cpt.h index 5e1519e2023..cf9046bbfb4 100644 --- a/drivers/common/cnxk/hw/cpt.h +++ b/drivers/common/cnxk/hw/cpt.h @@ -76,10 +76,11 @@ union cpt_eng_caps { uint64_t __io reserved_15_20 : 6; uint64_t __io sm3 : 1; uint64_t __io sm4 : 1; - uint64_t __io reserved_23_33 : 11; - uint64_t __io pdcp_chain : 1; + uint64_t __io reserved_23_34 : 12; uint64_t __io sg_ver2 : 1; - uint64_t __io reserved_36_63 : 28; + uint64_t __io sm2 : 1; + uint64_t __io pdcp_chain_zuc256 : 1; + uint64_t __io reserved_38_63 : 26; }; }; diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h index 319fe36e047..1720eb38157 100644 --- a/drivers/common/cnxk/hw/nix.h +++ b/drivers/common/cnxk/hw/nix.h @@ -619,6 +619,7 @@ #define NIX_RX_ACTIONOP_RSS (0x4ull) #define NIX_RX_ACTIONOP_PF_FUNC_DROP (0x5ull) #define NIX_RX_ACTIONOP_MIRROR (0x6ull) +#define NIX_RX_ACTIONOP_DEFAULT (0xfull) #define NIX_RX_VTAGACTION_VTAG0_RELPTR (0x0ull) #define NIX_RX_VTAGACTION_VTAG1_RELPTR (0x4ull) diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build index 79e10bac74d..56eea529090 100644 --- a/drivers/common/cnxk/meson.build +++ b/drivers/common/cnxk/meson.build @@ -8,7 +8,6 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64') subdir_done() endif -config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON' deps = ['eal', 'pci', 'bus_pci', 'mbuf', 'security'] sources = files( 'roc_ae.c', @@ -57,6 +56,7 @@ sources = files( 'roc_npa_irq.c', 'roc_npa_type.c', 'roc_npc.c', + 'roc_npc_aging.c', 'roc_npc_mcam.c', 'roc_npc_mcam_dump.c', 'roc_npc_parse.c', diff --git a/drivers/common/cnxk/roc_ae.c b/drivers/common/cnxk/roc_ae.c index 336b9276419..e6a013d7c48 100644 --- a/drivers/common/cnxk/roc_ae.c +++ b/drivers/common/cnxk/roc_ae.c @@ -149,7 +149,37 @@ const struct roc_ae_ec_group ae_ec_grp[ROC_AE_EC_ID_PMAX] = { 0xBF, 0x07, 0x35, 0x73, 0xDF, 0x88, 0x3D, 0x2C, 0x34, 0xF1, 0xEF, 0x45, 0x1F, 0xD4, 0x6B, 0x50, 0x3F, 0x00}, - .length = 66}}}; + .length = 66}, + }, + {}, + {}, + {}, + { + .prime = {.data = {0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF}, + .length = 32}, + .order = {.data = {0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x72, 0x03, 0xDF, 0x6B, 0x21, + 0xC6, 0x05, 0x2B, 0x53, 0xBB, 0xF4, 0x09, + 0x39, 0xD5, 0x41, 0x23}, + .length = 32}, + .consta = {.data = {0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFC}, + .length = 32}, + .constb = {.data = {0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, + 0x34, 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, + 0x09, 0xA7, 0xF3, 0x97, 0x89, 0xF5, 0x15, + 0xAB, 0x8F, 0x92, 0xDD, 0xBC, 0xBD, 0x41, + 0x4D, 0x94, 0x0E, 0x93}, + .length = 32}, + }}; int roc_ae_ec_grp_get(struct roc_ae_ec_group **tbl) diff --git a/drivers/common/cnxk/roc_ae.h b/drivers/common/cnxk/roc_ae.h index c972878effd..d459c5e6804 100644 --- a/drivers/common/cnxk/roc_ae.h +++ b/drivers/common/cnxk/roc_ae.h @@ -34,7 +34,8 @@ typedef enum { ROC_AE_EC_ID_P160 = 5, ROC_AE_EC_ID_P320 = 6, ROC_AE_EC_ID_P512 = 7, - ROC_AE_EC_ID_PMAX = 8 + ROC_AE_EC_ID_SM2 = 8, + ROC_AE_EC_ID_PMAX } roc_ae_ec_id; /* Prime and order fields of built-in elliptic curves */ @@ -67,6 +68,24 @@ struct roc_ae_ec_group { struct roc_ae_ec_ctx { /* Prime length defined by microcode for EC operations */ uint8_t curveid; + + /* Private key */ + struct { + uint8_t data[66]; + unsigned int length; + } pkey; + + /* Public key */ + struct { + struct { + uint8_t data[66]; + unsigned int length; + } x; + struct { + uint8_t data[66]; + unsigned int length; + } y; + } q; }; /* Buffer pointer */ diff --git a/drivers/common/cnxk/roc_ae_fpm_tables.c b/drivers/common/cnxk/roc_ae_fpm_tables.c index f91570299b9..ead3128e7f0 100644 --- a/drivers/common/cnxk/roc_ae_fpm_tables.c +++ b/drivers/common/cnxk/roc_ae_fpm_tables.c @@ -1057,6 +1057,189 @@ const uint8_t ae_fpm_tbl_p521[AE_FPM_P521_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }; +const uint8_t ae_fpm_tbl_p256_sm2[AE_FPM_P256_LEN] = { + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, 0x8F, 0xE3, 0x0B, 0xBF, + 0xF2, 0x66, 0x0B, 0xE1, 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, 0xD0, 0xA9, 0x87, 0x7C, + 0xC6, 0x2A, 0x47, 0x40, 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE1, 0x8B, 0xD5, 0x46, 0xB5, 0x82, 0x45, 0x17, 0x67, 0x38, 0x91, 0xD7, + 0x91, 0xCA, 0xA4, 0x86, 0xBA, 0x22, 0x0B, 0x99, 0xDF, 0x9F, 0x9A, 0x14, + 0x95, 0xAF, 0xBD, 0x11, 0x55, 0xC1, 0xDA, 0x54, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x8E, 0x44, 0x50, 0xEB, 0x33, 0x4A, 0xCD, 0xCB, 0xC3, 0xC7, 0xD1, 0x89, + 0x8A, 0x53, 0xF2, 0x0D, 0x2E, 0xEE, 0x75, 0x0F, 0x40, 0x53, 0x01, 0x7C, + 0xE8, 0xA6, 0xD8, 0x2C, 0x51, 0x73, 0x88, 0xC2, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF8, 0x1C, 0x8D, 0xA9, 0xB9, 0x9F, 0xBA, 0x55, 0x13, 0x7F, 0x6C, 0x61, + 0x49, 0xFE, 0xEF, 0x6E, 0xCB, 0x12, 0x9A, 0xA4, 0x94, 0xDA, 0x9A, 0xD4, + 0x82, 0xA0, 0xF5, 0x40, 0x7D, 0x12, 0x3D, 0xB6, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFD, 0xEC, 0xA0, 0x07, 0x72, 0xC4, 0xDB, 0xC9, 0xA9, 0x61, 0xB5, 0x8F, + 0x0C, 0xF5, 0x83, 0x73, 0xEC, 0xAC, 0xAB, 0x94, 0xE9, 0x73, 0xF9, 0xC3, + 0xF1, 0x2F, 0xA4, 0x69, 0x6A, 0x22, 0xCA, 0x3F, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xEA, 0xE3, 0xD9, 0xA9, 0xD1, 0x3A, 0x42, 0xED, 0x2B, 0x23, 0x08, 0xF6, + 0x48, 0x4E, 0x1B, 0x38, 0x3D, 0xB7, 0xB2, 0x48, 0x88, 0xC2, 0x1F, 0x3A, + 0xB6, 0x92, 0xE5, 0xB5, 0x74, 0xD5, 0x5D, 0xA9, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xD1, 0x86, 0x46, 0x9D, 0xE2, 0x95, 0xE5, 0xAB, 0xDB, 0x61, 0xAC, 0x17, + 0x73, 0x43, 0x8E, 0x6D, 0x5A, 0x92, 0x4F, 0x85, 0x54, 0x49, 0x26, 0xF9, + 0xA1, 0x75, 0x05, 0x1B, 0x0F, 0x3F, 0xB6, 0x13, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xA7, 0x2D, 0x08, 0x4F, 0x62, 0xC8, 0xD5, 0x8B, 0xE3, 0xD6, 0x46, 0x7D, + 0xEA, 0xF4, 0x8F, 0xD7, 0x8F, 0xE7, 0x5E, 0x5A, 0x12, 0x8A, 0x56, 0xA7, + 0xC0, 0x02, 0x3F, 0xE7, 0xFF, 0x2B, 0x68, 0xBD, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0xF6, 0x77, 0x82, 0x31, 0x68, 0x15, 0xF9, 0xB5, 0x2B, 0x6D, 0x9B, + 0x19, 0xA6, 0x9C, 0xD2, 0x5D, 0x1E, 0xD6, 0xFA, 0x89, 0xCB, 0xBA, 0xDE, + 0x79, 0x6C, 0x91, 0x0E, 0xE7, 0xF4, 0xCC, 0xDB, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1B, 0x21, 0x50, 0xC1, 0xC5, 0xF1, 0x30, 0x15, 0xDA, 0xAB, 0xA9, 0x1B, + 0x5D, 0x95, 0x2C, 0x9B, 0x0E, 0x8C, 0xC2, 0x4C, 0x3F, 0x54, 0x61, 0x42, + 0x75, 0xA3, 0x4B, 0x24, 0x37, 0x05, 0xF2, 0x60, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x77, 0xD1, 0x95, 0x42, 0x1C, 0xEF, 0x13, 0x39, 0x63, 0x66, 0x44, 0xAA, + 0x0C, 0x3A, 0x06, 0x23, 0x46, 0x83, 0xDF, 0x17, 0x6E, 0xEB, 0x24, 0x44, + 0x64, 0x2C, 0xE3, 0xBD, 0x35, 0x35, 0xE7, 0x4D, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x4A, 0x59, 0xAC, 0x2C, 0x6E, 0x7E, 0xCC, 0x08, 0xAF, 0x2B, 0x71, 0x16, + 0x4F, 0x19, 0x1D, 0x63, 0x36, 0x22, 0xA8, 0x7F, 0xB2, 0x84, 0x55, 0x4F, + 0xD9, 0xEB, 0x39, 0x7B, 0x44, 0x1E, 0x9C, 0xD0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xA6, 0x6B, 0x8A, 0x48, 0x93, 0xB6, 0xA5, 0x4D, 0x26, 0xFB, 0x89, 0xA4, + 0x0B, 0x4A, 0x66, 0x3A, 0xAF, 0xA8, 0x75, 0x01, 0xEE, 0xDF, 0xC9, 0xF4, + 0xF3, 0xF0, 0x00, 0xBC, 0x66, 0xF9, 0x81, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xAD, 0x8B, 0xC6, 0x8C, 0xE0, 0x31, 0xD6, 0x16, 0x16, 0x88, 0x8D, 0x8E, + 0xE4, 0x00, 0x31, 0x87, 0x44, 0xC0, 0x75, 0x7F, 0x3B, 0xB8, 0xB6, 0x00, + 0x79, 0x3F, 0xAE, 0x7A, 0xF0, 0x16, 0x42, 0x45, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x21, 0x0C, 0xD0, 0x42, 0x97, 0x3F, 0x33, 0x3B, 0x08, 0x66, 0x6F, 0xF5, + 0x2D, 0xBD, 0x25, 0xF9, 0x65, 0xC5, 0xB1, 0x29, 0xF5, 0xF7, 0xAD, 0x5D, + 0xE0, 0x3D, 0x7A, 0x8D, 0x19, 0xB3, 0x21, 0x9A, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xD6, 0x8B, 0xFB, 0xAC, 0xE0, 0xE0, 0x03, 0x92, 0x26, 0x10, 0x14, 0xF7, + 0xD3, 0x44, 0x5D, 0xC7, 0xD9, 0xF4, 0x6B, 0x27, 0x14, 0xA0, 0x71, 0xEE, + 0x1B, 0x20, 0x0A, 0xF3, 0x08, 0x10, 0xB6, 0x82, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0D, 0x91, 0xD8, 0xB1, 0x2A, 0xE6, 0x9B, 0xCD, 0x74, 0xA0, 0x8F, 0x17, + 0xBF, 0x8C, 0xD9, 0x81, 0xD8, 0x22, 0x91, 0x3C, 0xF0, 0xD2, 0xB8, 0x2D, + 0x24, 0x8B, 0x7A, 0xF0, 0xB0, 0x5B, 0xFA, 0xD2, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xBA, 0x11, 0x9A, 0x04, 0x9E, 0x62, 0xF2, 0xE2, 0xF2, 0x78, 0xE8, 0xA3, + 0x4D, 0xF0, 0x5A, 0xE5, 0xD2, 0x69, 0xF3, 0x56, 0x4E, 0xB5, 0xD1, 0x80, + 0x8E, 0x74, 0xAD, 0x0F, 0x4F, 0x95, 0x7C, 0xB1, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x2F, 0xF4, 0xDA, 0xBD, 0x76, 0xE2, 0xDD, 0x91, 0x37, 0x3F, 0x20, + 0x63, 0x0F, 0xDB, 0x7F, 0xF4, 0x3E, 0xAB, 0x47, 0x49, 0x92, 0x90, 0x4C, + 0x55, 0xA5, 0xCC, 0xC7, 0xAF, 0x3B, 0x6D, 0xB4, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5A, 0xD1, 0x04, 0xA8, 0xBD, 0xD2, 0x3D, 0xE9, 0xF5, 0xA9, 0xE5, 0x15, + 0xEB, 0x71, 0xC2, 0xC1, 0x39, 0x05, 0x42, 0xA0, 0xBA, 0x95, 0xC1, 0x74, + 0x4C, 0x55, 0xFB, 0x20, 0x42, 0x64, 0x91, 0xBF, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x91, 0x52, 0x57, 0x35, 0xEF, 0x62, 0x62, 0x89, 0xD2, 0xED, 0x97, 0x7F, + 0x88, 0xF0, 0x96, 0x35, 0xFD, 0x48, 0x73, 0x1B, 0x7A, 0x8A, 0x85, 0x21, + 0x08, 0xF8, 0x9A, 0x03, 0xB8, 0xFD, 0xEB, 0xEA, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7E, 0x8E, 0x61, 0xEA, 0x35, 0xEB, 0x8E, 0x2E, 0x1B, 0xB2, 0x70, 0x0D, + 0xB9, 0x8A, 0x76, 0x2C, 0xD8, 0x1E, 0xA2, 0x3B, 0x77, 0x38, 0xC1, 0x7C, + 0xF9, 0xDE, 0xF2, 0xA4, 0x6D, 0xBA, 0x26, 0xA3, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x3A, 0x79, 0x12, 0xD0, 0x5E, 0x32, 0x9F, 0x34, 0x66, 0x4A, 0x08, + 0x96, 0xCC, 0xDE, 0x0E, 0x56, 0xC2, 0x26, 0x52, 0x61, 0x42, 0x83, 0xBB, + 0x91, 0x69, 0x28, 0x99, 0xD5, 0xFF, 0x05, 0x13, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x44, 0x9D, 0x48, 0xD8, 0xF3, 0xBD, 0xBE, 0x19, 0xAB, 0x95, 0xDE, 0x03, + 0xCC, 0x85, 0x10, 0xCB, 0xAE, 0xF1, 0x59, 0x46, 0x3F, 0x8B, 0xFB, 0x25, + 0xDA, 0x72, 0xC3, 0x79, 0xDA, 0xE3, 0xCA, 0x8B, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xCB, 0xA9, 0x31, 0x5C, 0xE8, 0x2C, 0xC3, 0xEA, 0x4E, 0x52, 0x4B, 0xAC, + 0x38, 0xA5, 0x80, 0x20, 0x36, 0xBA, 0x27, 0x52, 0x53, 0x8E, 0x34, 0x8C, + 0xB1, 0x70, 0xD0, 0xDA, 0x75, 0xED, 0x45, 0x0F, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x94, 0x7A, 0xF0, 0xF5, 0x2B, 0x4F, 0x8D, 0xA6, 0x7E, 0xDA, 0x17, 0xD9, + 0x17, 0x82, 0x79, 0x76, 0x5B, 0xA7, 0x9A, 0x0C, 0x70, 0x58, 0x53, 0xA0, + 0xA5, 0xD9, 0x87, 0x3B, 0x3F, 0xB2, 0xDD, 0xC7, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xC2, 0xA4, 0x81, 0x62, 0xA5, 0xFD, 0x9C, 0xE9, 0x80, 0xEE, 0x8A, 0xE5, + 0x26, 0xF2, 0x5F, 0x02, 0xF6, 0x0C, 0x8E, 0xF6, 0x63, 0x3B, 0xE6, 0xA9, + 0xE2, 0xE2, 0x3F, 0x02, 0x29, 0xA8, 0x4A, 0x35, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xBC, 0x49, 0x45, 0xBD, 0x86, 0xBB, 0x6A, 0xFB, 0x23, 0x7E, 0xB7, 0x11, + 0xEB, 0xA4, 0x6F, 0xEE, 0x7C, 0x1D, 0xB5, 0x8B, 0x7B, 0x86, 0xEB, 0x33, + 0xD9, 0x4E, 0xB7, 0x28, 0x27, 0x3B, 0x3A, 0xC7, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xBE, 0x17, 0x17, 0xE5, 0x95, 0x68, 0xD0, 0xA4, 0x4A, 0x60, 0x67, 0xCC, + 0x45, 0xF7, 0x02, 0x12, 0x19, 0xB3, 0x2E, 0xB5, 0xAF, 0xC2, 0xFB, 0x17, + 0xBE, 0x3C, 0x1E, 0x7A, 0xC3, 0xAC, 0x9D, 0x3C, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + const struct ae_fpm_entry ae_fpm_tbl_scalar[ROC_AE_EC_ID_PMAX] = { { .data = ae_fpm_tbl_p192, @@ -1077,6 +1260,13 @@ const struct ae_fpm_entry ae_fpm_tbl_scalar[ROC_AE_EC_ID_PMAX] = { { .data = ae_fpm_tbl_p521, .len = sizeof(ae_fpm_tbl_p521) + }, + {}, + {}, + {}, + { + .data = ae_fpm_tbl_p256_sm2, + .len = sizeof(ae_fpm_tbl_p256_sm2) } }; diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c index d235ff51ca8..981e85a204a 100644 --- a/drivers/common/cnxk/roc_cpt.c +++ b/drivers/common/cnxk/roc_cpt.c @@ -331,6 +331,8 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_inline_ipse req->param2 = cfg->param2; req->opcode = cfg->opcode; req->bpid = cfg->bpid; + req->ctx_ilen_valid = cfg->ctx_ilen_valid; + req->ctx_ilen = cfg->ctx_ilen; rc = mbox_process(mbox); exit: @@ -460,8 +462,8 @@ cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf) } int -cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr, - bool inl_dev_sso) +cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr, bool inl_dev_sso, + bool ctx_ilen_valid, uint8_t ctx_ilen) { struct cpt_lf_alloc_req_msg *req; struct mbox *mbox = mbox_get(dev->mbox); @@ -485,6 +487,8 @@ cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr, req->sso_pf_func = idev_sso_pffunc_get(); req->eng_grpmsk = eng_grpmsk; req->blkaddr = blkaddr; + req->ctx_ilen_valid = ctx_ilen_valid; + req->ctx_ilen = ctx_ilen; rc = mbox_process(mbox); exit: @@ -587,6 +591,8 @@ roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf) struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt); uint8_t blkaddr[ROC_CPT_MAX_BLKS]; struct msix_offset_rsp *rsp; + bool ctx_ilen_valid = false; + uint16_t ctx_ilen = 0; uint8_t eng_grpmsk; int blknum = 0; int rc, i; @@ -618,7 +624,13 @@ roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf) (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) | (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]); - rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr[blknum], false); + if (roc_errata_cpt_has_ctx_fetch_issue()) { + ctx_ilen_valid = true; + /* Inbound SA size is max context size */ + ctx_ilen = (PLT_ALIGN(ROC_OT_IPSEC_SA_SZ_MAX, ROC_ALIGN) / 128) - 1; + } + + rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr[blknum], false, ctx_ilen_valid, ctx_ilen); if (rc) goto lfs_detach; @@ -1108,6 +1120,11 @@ roc_cpt_iq_enable(struct roc_cpt_lf *lf) lf_inprog.s.eena = 1; plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG); + if (roc_errata_cpt_has_ctx_fetch_issue()) { + /* Enable flush on FLR */ + plt_write64(1, lf->rbase + CPT_LF_CTX_CTL); + } + cpt_lf_dump(lf); } diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h index 910bd37a0cb..787bccb27d0 100644 --- a/drivers/common/cnxk/roc_cpt.h +++ b/drivers/common/cnxk/roc_cpt.h @@ -161,6 +161,8 @@ struct roc_cpt_inline_ipsec_inb_cfg { uint16_t bpid; uint32_t credit_th; uint8_t egrp; + uint8_t ctx_ilen_valid : 1; + uint8_t ctx_ilen : 7; }; int __roc_api roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, diff --git a/drivers/common/cnxk/roc_cpt_priv.h b/drivers/common/cnxk/roc_cpt_priv.h index 61dec9a1687..4ed87c857b4 100644 --- a/drivers/common/cnxk/roc_cpt_priv.h +++ b/drivers/common/cnxk/roc_cpt_priv.h @@ -21,8 +21,8 @@ roc_cpt_to_cpt_priv(struct roc_cpt *roc_cpt) int cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf); int cpt_lfs_detach(struct dev *dev); -int cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blk, - bool inl_dev_sso); +int cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blk, bool inl_dev_sso, + bool ctx_ilen_valid, uint8_t ctx_ilen); int cpt_lfs_free(struct dev *dev); int cpt_lf_init(struct roc_cpt_lf *lf); void cpt_lf_fini(struct roc_cpt_lf *lf); diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c index 4b0ba218ed9..e7e89bf3d6d 100644 --- a/drivers/common/cnxk/roc_dev.c +++ b/drivers/common/cnxk/roc_dev.c @@ -18,7 +18,7 @@ #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ /* VF Mbox handler thread name */ -#define MBOX_HANDLER_NAME_MAX_LEN 25 +#define MBOX_HANDLER_NAME_MAX_LEN RTE_THREAD_INTERNAL_NAME_SIZE /* VF interrupt message pending bits - mbox or flr */ #define ROC_DEV_MBOX_PEND BIT_ULL(0) @@ -451,7 +451,6 @@ process_msgs(struct dev *dev, struct mbox *mbox) * while PFC already configured on other VFs. This is * not an error but a warning which can be ignored. */ -#define LMAC_AF_ERR_PERM_DENIED -1103 if (msg->rc) { if (msg->rc == LMAC_AF_ERR_PERM_DENIED) { plt_mbox_dbg( @@ -464,6 +463,18 @@ process_msgs(struct dev *dev, struct mbox *mbox) } } break; + case MBOX_MSG_CGX_PROMISC_DISABLE: + case MBOX_MSG_CGX_PROMISC_ENABLE: + if (msg->rc) { + if (msg->rc == LMAC_AF_ERR_INVALID_PARAM) { + plt_mbox_dbg("Already in same promisc state"); + msg->rc = 0; + } else { + plt_err("Message (%s) response has err=%d", + mbox_id2name(msg->id), msg->rc); + } + } + break; default: if (msg->rc) @@ -759,14 +770,22 @@ roc_pf_vf_mbox_irq(void *param) * by 1ms until this region is zeroed mbox_wait_for_zero() */ mbox_data = plt_read64(dev->bar2 + RVU_VF_VFPF_MBOX0); - if (mbox_data) - plt_write64(!mbox_data, dev->bar2 + RVU_VF_VFPF_MBOX0); + /* If interrupt occurred for down message */ + if (mbox_data & MBOX_DOWN_MSG) { + mbox_data &= ~MBOX_DOWN_MSG; + plt_write64(mbox_data, dev->bar2 + RVU_VF_VFPF_MBOX0); - /* First process all configuration messages */ - process_msgs(dev, dev->mbox); + /* First process all configuration messages */ + process_msgs(dev, dev->mbox); + } + /* If interrupt occurred for UP message */ + if (mbox_data & MBOX_UP_MSG) { + mbox_data &= ~MBOX_UP_MSG; + plt_write64(mbox_data, dev->bar2 + RVU_VF_VFPF_MBOX0); - /* Process Uplink messages */ - process_msgs_up(dev, &dev->mbox_up); + /* Process Uplink messages */ + process_msgs_up(dev, &dev->mbox_up); + } } /* IRQ to PF from AF - PF context (interrupt thread) */ @@ -788,14 +807,22 @@ roc_af_pf_mbox_irq(void *param) * by 1ms until this region is zeroed mbox_wait_for_zero() */ mbox_data = plt_read64(dev->bar2 + RVU_PF_PFAF_MBOX0); - if (mbox_data) - plt_write64(!mbox_data, dev->bar2 + RVU_PF_PFAF_MBOX0); + /* If interrupt occurred for down message */ + if (mbox_data & MBOX_DOWN_MSG) { + mbox_data &= ~MBOX_DOWN_MSG; + plt_write64(mbox_data, dev->bar2 + RVU_PF_PFAF_MBOX0); - /* First process all configuration messages */ - process_msgs(dev, dev->mbox); + /* First process all configuration messages */ + process_msgs(dev, dev->mbox); + } + /* If interrupt occurred for up message */ + if (mbox_data & MBOX_UP_MSG) { + mbox_data &= ~MBOX_UP_MSG; + plt_write64(mbox_data, dev->bar2 + RVU_PF_PFAF_MBOX0); - /* Process Uplink messages */ - process_msgs_up(dev, &dev->mbox_up); + /* Process Uplink messages */ + process_msgs_up(dev, &dev->mbox_up); + } } static int @@ -1070,7 +1097,7 @@ vf_flr_handle_msg(void *param, dev_intr_t *flr) } } -static void * +static uint32_t pf_vf_mbox_thread_main(void *arg) { struct dev *dev = arg; @@ -1114,7 +1141,7 @@ pf_vf_mbox_thread_main(void *arg) pthread_mutex_unlock(&dev->sync.mutex); - return NULL; + return 0; } static void @@ -1155,7 +1182,7 @@ dev_active_vfs(struct dev *dev) int i, count = 0; for (i = 0; i < MAX_VFPF_DWORD_BITS; i++) - count += __builtin_popcount(dev->active_vfs[i]); + count += plt_popcount32(dev->active_vfs[i]); return count; } @@ -1358,6 +1385,11 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev) if (!dev_cache_line_size_valid()) return -EFAULT; + if (!roc_plt_lmt_validate()) { + plt_err("Failed to validate LMT line"); + return -EFAULT; + } + bar2 = (uintptr_t)pci_dev->mem_resource[2].addr; bar4 = (uintptr_t)pci_dev->mem_resource[4].addr; if (bar2 == 0 || bar4 == 0) { @@ -1455,13 +1487,13 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev) pthread_cond_init(&dev->sync.pfvf_msg_cond, NULL); pthread_mutex_init(&dev->sync.mutex, NULL); - snprintf(name, MBOX_HANDLER_NAME_MAX_LEN, "pf%d_vf_msg_hndlr", dev->pf); + snprintf(name, MBOX_HANDLER_NAME_MAX_LEN, "mbox_pf%d", dev->pf); dev->sync.start_thread = true; - rc = plt_ctrl_thread_create(&dev->sync.pfvf_msg_thread, name, NULL, - pf_vf_mbox_thread_main, dev); + rc = plt_thread_create_control(&dev->sync.pfvf_msg_thread, name, + pf_vf_mbox_thread_main, dev); if (rc != 0) { plt_err("Failed to create thread for VF mbox handling\n"); - goto iounmap; + goto thread_fail; } } @@ -1488,10 +1520,11 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev) if (dev->sync.start_thread) { dev->sync.start_thread = false; pthread_cond_signal(&dev->sync.pfvf_msg_cond); - pthread_join(dev->sync.pfvf_msg_thread, NULL); - pthread_mutex_destroy(&dev->sync.mutex); - pthread_cond_destroy(&dev->sync.pfvf_msg_cond); + plt_thread_join(dev->sync.pfvf_msg_thread, NULL); } +thread_fail: + pthread_mutex_destroy(&dev->sync.mutex); + pthread_cond_destroy(&dev->sync.pfvf_msg_cond); iounmap: dev_vf_mbase_put(pci_dev, vf_mbase); mbox_unregister: @@ -1519,7 +1552,7 @@ dev_fini(struct dev *dev, struct plt_pci_device *pci_dev) if (dev->sync.start_thread) { dev->sync.start_thread = false; pthread_cond_signal(&dev->sync.pfvf_msg_cond); - pthread_join(dev->sync.pfvf_msg_thread, NULL); + plt_thread_join(dev->sync.pfvf_msg_thread, NULL); pthread_mutex_destroy(&dev->sync.mutex); pthread_cond_destroy(&dev->sync.pfvf_msg_cond); } diff --git a/drivers/common/cnxk/roc_dev_priv.h b/drivers/common/cnxk/roc_dev_priv.h index 1f84f74ff39..5b2c5096f8f 100644 --- a/drivers/common/cnxk/roc_dev_priv.h +++ b/drivers/common/cnxk/roc_dev_priv.h @@ -73,7 +73,7 @@ dev_is_afvf(uint16_t pf_func) struct mbox_sync { bool start_thread; uint8_t msg_avail; - pthread_t pfvf_msg_thread; + plt_thread_t pfvf_msg_thread; pthread_cond_t pfvf_msg_cond; pthread_mutex_t mutex; }; diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c index 93c8318a3d2..c2411682945 100644 --- a/drivers/common/cnxk/roc_dpi.c +++ b/drivers/common/cnxk/roc_dpi.c @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(C) 2021 Marvell. */ + #include #include #include @@ -52,17 +53,12 @@ roc_dpi_disable(struct roc_dpi *dpi) } int -roc_dpi_configure(struct roc_dpi *roc_dpi) +roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uint64_t chunk_base) { struct plt_pci_device *pci_dev; - const struct plt_memzone *dpi_mz; dpi_mbox_msg_t mbox_msg; - struct npa_pool_s pool; - struct npa_aura_s aura; - int rc, count, buflen; - uint64_t aura_handle; - plt_iova_t iova; - char name[32]; + uint64_t reg; + int rc; if (!roc_dpi) { plt_err("roc_dpi is NULL"); @@ -70,79 +66,30 @@ roc_dpi_configure(struct roc_dpi *roc_dpi) } pci_dev = roc_dpi->pci_dev; - memset(&pool, 0, sizeof(struct npa_pool_s)); - pool.nat_align = 1; - - memset(&aura, 0, sizeof(aura)); - rc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE, - DPI_CMD_QUEUE_BUFS, &aura, &pool, 0); - if (rc) { - plt_err("Failed to create NPA pool, err %d\n", rc); - return rc; - } - - snprintf(name, sizeof(name), "dpimem%d", roc_dpi->vfid); - buflen = DPI_CMD_QUEUE_SIZE * DPI_CMD_QUEUE_BUFS; - dpi_mz = plt_memzone_reserve_aligned(name, buflen, 0, - DPI_CMD_QUEUE_SIZE); - if (dpi_mz == NULL) { - plt_err("dpi memzone reserve failed"); - rc = -ENOMEM; - goto err1; - } - - roc_dpi->mz = dpi_mz; - iova = dpi_mz->iova; - for (count = 0; count < DPI_CMD_QUEUE_BUFS; count++) { - roc_npa_aura_op_free(aura_handle, 0, iova); - iova += DPI_CMD_QUEUE_SIZE; - } - - roc_dpi->chunk_base = (void *)roc_npa_aura_op_alloc(aura_handle, 0); - if (!roc_dpi->chunk_base) { - plt_err("Failed to alloc buffer from NPA aura"); - rc = -ENOMEM; - goto err2; - } - - roc_dpi->chunk_next = (void *)roc_npa_aura_op_alloc(aura_handle, 0); - if (!roc_dpi->chunk_next) { - plt_err("Failed to alloc buffer from NPA aura"); - rc = -ENOMEM; - goto err2; - } - roc_dpi->aura_handle = aura_handle; - /* subtract 2 as they have already been alloc'ed above */ - roc_dpi->pool_size_m1 = (DPI_CMD_QUEUE_SIZE >> 3) - 2; + roc_dpi_disable(roc_dpi); + reg = plt_read64(roc_dpi->rbase + DPI_VDMA_SADDR); + while (!(reg & BIT_ULL(63))) + reg = plt_read64(roc_dpi->rbase + DPI_VDMA_SADDR); plt_write64(0x0, roc_dpi->rbase + DPI_VDMA_REQQ_CTL); - plt_write64(((uint64_t)(roc_dpi->chunk_base) >> 7) << 7, - roc_dpi->rbase + DPI_VDMA_SADDR); + plt_write64(chunk_base, roc_dpi->rbase + DPI_VDMA_SADDR); mbox_msg.u[0] = 0; mbox_msg.u[1] = 0; /* DPI PF driver expects vfid starts from index 0 */ mbox_msg.s.vfid = roc_dpi->vfid; mbox_msg.s.cmd = DPI_QUEUE_OPEN; - mbox_msg.s.csize = DPI_CMD_QUEUE_SIZE; - mbox_msg.s.aura = roc_npa_aura_handle_to_aura(aura_handle); + mbox_msg.s.csize = chunk_sz; + mbox_msg.s.aura = aura; mbox_msg.s.sso_pf_func = idev_sso_pffunc_get(); mbox_msg.s.npa_pf_func = idev_npa_pffunc_get(); rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg, sizeof(dpi_mbox_msg_t)); - if (rc < 0) { + if (rc < 0) plt_err("Failed to send mbox message %d to DPI PF, err %d", mbox_msg.s.cmd, rc); - goto err2; - } - - return rc; -err2: - plt_memzone_free(dpi_mz); -err1: - roc_npa_pool_destroy(aura_handle); return rc; } @@ -153,11 +100,9 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi) uint16_t vfid; roc_dpi->rbase = pci_dev->mem_resource[0].addr; - vfid = ((pci_dev->addr.devid & 0x1F) << 3) | - (pci_dev->addr.function & 0x7); + vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7); vfid -= 1; roc_dpi->vfid = vfid; - plt_spinlock_init(&roc_dpi->chunk_lock); return 0; } @@ -180,14 +125,9 @@ roc_dpi_dev_fini(struct roc_dpi *roc_dpi) mbox_msg.s.vfid = roc_dpi->vfid; mbox_msg.s.cmd = DPI_QUEUE_CLOSE; - rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg, - sizeof(dpi_mbox_msg_t)); + rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg, sizeof(dpi_mbox_msg_t)); if (rc < 0) - plt_err("Failed to send mbox message %d to DPI PF, err %d", - mbox_msg.s.cmd, rc); - - roc_npa_pool_destroy(roc_dpi->aura_handle); - plt_memzone_free(roc_dpi->mz); + plt_err("Failed to send mbox message %d to DPI PF, err %d", mbox_msg.s.cmd, rc); return rc; } diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h index 2f061b07c53..4ebde5b8a65 100644 --- a/drivers/common/cnxk/roc_dpi.h +++ b/drivers/common/cnxk/roc_dpi.h @@ -5,41 +5,17 @@ #ifndef _ROC_DPI_H_ #define _ROC_DPI_H_ -struct roc_dpi_args { - uint8_t num_ssegs; - uint8_t num_dsegs; - uint8_t comp_type; - uint8_t direction; - uint8_t sdevice; - uint8_t ddevice; - uint8_t swap; - uint8_t use_lock : 1; - uint8_t tt : 7; - uint16_t func; - uint16_t grp; - uint32_t tag; - uint64_t comp_ptr; -}; - struct roc_dpi { - /* Input parameters */ struct plt_pci_device *pci_dev; - /* End of Input parameters */ - const struct plt_memzone *mz; uint8_t *rbase; uint16_t vfid; - uint16_t pool_size_m1; - uint16_t chunk_head; - uint64_t *chunk_base; - uint64_t *chunk_next; - uint64_t aura_handle; - plt_spinlock_t chunk_lock; } __plt_cache_aligned; int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi); int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi); -int __roc_api roc_dpi_configure(struct roc_dpi *dpi); +int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura, + uint64_t chunk_base); int __roc_api roc_dpi_enable(struct roc_dpi *dpi); int __roc_api roc_dpi_disable(struct roc_dpi *dpi); diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h index 1fa1a715d37..518a3e73517 100644 --- a/drivers/common/cnxk/roc_dpi_priv.h +++ b/drivers/common/cnxk/roc_dpi_priv.h @@ -16,9 +16,6 @@ #define DPI_REG_DUMP 0x3 #define DPI_GET_REG_CFG 0x4 -#define DPI_CMD_QUEUE_SIZE 4096 -#define DPI_CMD_QUEUE_BUFS 1024 - typedef union dpi_mbox_msg_t { uint64_t u[2]; struct dpi_mbox_message_s { diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h index 22d2406e949..6f84e06603b 100644 --- a/drivers/common/cnxk/roc_errata.h +++ b/drivers/common/cnxk/roc_errata.h @@ -82,6 +82,13 @@ roc_errata_cpt_hang_on_x2p_bp(void) return roc_model_is_cn10ka_a0() || roc_model_is_cn10ka_a1(); } +/* Errata IPBUCPT-38756 */ +static inline bool +roc_errata_cpt_has_ctx_fetch_issue(void) +{ + return roc_model_is_cn10kb(); +} + /* IPBUNIXRX-40400 */ static inline bool roc_errata_nix_no_meta_aura(void) diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h index 815f800e7ab..f4807ee2719 100644 --- a/drivers/common/cnxk/roc_features.h +++ b/drivers/common/cnxk/roc_features.h @@ -46,6 +46,12 @@ roc_feature_nix_has_reass(void) return roc_model_is_cn10ka(); } +static inline bool +roc_feature_nix_has_cqe_stash(void) +{ + return roc_model_is_cn10ka_b0(); +} + static inline bool roc_feature_nix_has_rxchan_multi_bpid(void) { @@ -71,4 +77,10 @@ roc_feature_bphy_has_macsec(void) { return roc_model_is_cnf10kb(); } + +static inline bool +roc_feature_nix_has_inl_ipsec(void) +{ + return !roc_model_is_cnf10kb(); +} #endif diff --git a/drivers/common/cnxk/roc_ie_ot.h b/drivers/common/cnxk/roc_ie_ot.h index b7fcdf9ba7c..af2691e0eb2 100644 --- a/drivers/common/cnxk/roc_ie_ot.h +++ b/drivers/common/cnxk/roc_ie_ot.h @@ -570,6 +570,9 @@ PLT_STATIC_ASSERT(offsetof(struct roc_ot_ipsec_outb_sa, hmac_opad_ipad) == PLT_STATIC_ASSERT(offsetof(struct roc_ot_ipsec_outb_sa, ctx) == 31 * sizeof(uint64_t)); +#define ROC_OT_IPSEC_SA_SZ_MAX \ + (PLT_MAX(sizeof(struct roc_ot_ipsec_inb_sa), sizeof(struct roc_ot_ipsec_outb_sa))) + void __roc_api roc_ot_ipsec_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa, bool is_inline); void __roc_api roc_ot_ipsec_outb_sa_init(struct roc_ot_ipsec_outb_sa *sa); diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c index c91fa63e837..7b734fcd24a 100644 --- a/drivers/common/cnxk/roc_mbox.c +++ b/drivers/common/cnxk/roc_mbox.c @@ -209,10 +209,9 @@ static void mbox_msg_send_data(struct mbox *mbox, int devid, uint8_t data) { struct mbox_dev *mdev = &mbox->dev[devid]; - struct mbox_hdr *tx_hdr = - (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start); - struct mbox_hdr *rx_hdr = - (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); + struct mbox_hdr *tx_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start); + struct mbox_hdr *rx_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); + uint64_t intr_val; /* Reset header for next messages */ tx_hdr->msg_size = mdev->msg_size; @@ -229,11 +228,16 @@ mbox_msg_send_data(struct mbox *mbox, int devid, uint8_t data) /* Sync mbox data into memory */ plt_wmb(); + /* Check for any pending interrupt */ + intr_val = plt_read64( + (volatile void *)(mbox->reg_base + (mbox->trigger | (devid << mbox->tr_shift)))); + + intr_val |= (uint64_t)data; /* The interrupt should be fired after num_msgs is written * to the shared memory */ - plt_write64(data, (volatile void *)(mbox->reg_base + - (mbox->trigger | (devid << mbox->tr_shift)))); + plt_write64(intr_val, (volatile void *)(mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift)))); } /** diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index 2f85b2f7550..05434aec5a2 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -227,6 +227,8 @@ struct mbox_msghdr { npc_mcam_get_stats_req, npc_mcam_get_stats_rsp) \ M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info, \ npc_get_field_hash_info_req, npc_get_field_hash_info_rsp) \ + M(NPC_MCAM_GET_HIT_STATUS, 0x6015, npc_mcam_get_hit_status, \ + npc_mcam_get_hit_status_req, npc_mcam_get_hit_status_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, nix_lf_alloc_req, \ nix_lf_alloc_rsp) \ @@ -540,10 +542,25 @@ struct lmtst_tbl_setup_req { }; /* CGX mbox message formats */ +/* CGX mailbox error codes + * Range 1101 - 1200. + */ +enum cgx_af_status { + LMAC_AF_ERR_INVALID_PARAM = -1101, + LMAC_AF_ERR_PF_NOT_MAPPED = -1102, + LMAC_AF_ERR_PERM_DENIED = -1103, + LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104, + LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105, + LMAC_AF_ERR_CMD_TIMEOUT = -1106, + LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107, + LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108, + LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109, + LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110, +}; struct cgx_stats_rsp { struct mbox_msghdr hdr; -#define CGX_RX_STATS_COUNT 13 +#define CGX_RX_STATS_COUNT 9 #define CGX_TX_STATS_COUNT 18 uint64_t __io rx_stats[CGX_RX_STATS_COUNT]; uint64_t __io tx_stats[CGX_TX_STATS_COUNT]; @@ -569,6 +586,7 @@ struct cgx_fec_stats_rsp { struct cgx_mac_addr_set_or_get { struct mbox_msghdr hdr; uint8_t __io mac_addr[PLT_ETHER_ADDR_LEN]; + uint32_t index; }; /* Structure for requesting the operation to @@ -2002,6 +2020,8 @@ struct cpt_lf_alloc_req_msg { uint16_t __io sso_pf_func; uint16_t __io eng_grpmsk; uint8_t __io blkaddr; + uint8_t __io ctx_ilen_valid : 1; + uint8_t __io ctx_ilen : 7; }; #define CPT_INLINE_INBOUND 0 @@ -2083,6 +2103,8 @@ struct cpt_rx_inline_lf_cfg_msg { uint32_t __io credit_th; uint16_t __io bpid; uint32_t __io reserved; + uint8_t __io ctx_ilen_valid : 1; + uint8_t __io ctx_ilen : 7; }; struct cpt_caps_rsp_msg { @@ -2466,6 +2488,31 @@ struct npc_mcam_get_stats_rsp { uint8_t __io stat_ena; /* enabled */ }; +#define MCAM_ARR_SIZE 256 +#define MCAM_ARR_ELEM_SZ 64 + +struct npc_mcam_get_hit_status_req { + struct mbox_msghdr hdr; + /* If clear == true, then if the hit status bit for mcam id is set, + * then needs to cleared by writing 1 back. + * If clear == false, then leave the hit status bit as is. + */ + bool __io clear; + uint8_t __io reserved[3]; + /* Start range of mcam id */ + uint32_t __io range_valid_mcam_ids_start; + /* End range of mcam id */ + uint32_t __io range_valid_mcam_ids_end; + /* Bitmap of mcam ids for which the hit status needs to checked */ + uint64_t __io mcam_ids[MCAM_ARR_SIZE]; +}; + +struct npc_mcam_get_hit_status_rsp { + struct mbox_msghdr hdr; + /* Bitmap of mcam hit status, prior to clearing */ + uint64_t __io mcam_hit_status[MCAM_ARR_SIZE]; +}; + /* TIM mailbox error codes * Range 801 - 900. */ diff --git a/drivers/common/cnxk/roc_mcs.c b/drivers/common/cnxk/roc_mcs.c index 1f269ddae56..f823f7f4784 100644 --- a/drivers/common/cnxk/roc_mcs.c +++ b/drivers/common/cnxk/roc_mcs.c @@ -10,6 +10,7 @@ struct mcs_event_cb { enum roc_mcs_event_type event; roc_mcs_dev_cb_fn cb_fn; void *cb_arg; + void *userdata; void *ret_param; uint32_t active; }; @@ -320,12 +321,16 @@ roc_mcs_intr_configure(struct roc_mcs *mcs, struct roc_mcs_intr_cfg *config) { struct mcs_intr_cfg *req; struct msg_rsp *rsp; + int rc; if (config == NULL) return -EINVAL; MCS_SUPPORT_CHECK; + if (mcs->intr_cfg_once) + return 0; + req = mbox_alloc_msg_mcs_intr_cfg(mcs->mbox); if (req == NULL) return -ENOMEM; @@ -333,7 +338,11 @@ roc_mcs_intr_configure(struct roc_mcs *mcs, struct roc_mcs_intr_cfg *config) req->intr_mask = config->intr_mask; req->mcs_id = mcs->idx; - return mbox_process_msg(mcs->mbox, (void *)&rsp); + rc = mbox_process_msg(mcs->mbox, (void *)&rsp); + if (rc == 0) + mcs->intr_cfg_once = true; + + return rc; } int @@ -630,7 +639,7 @@ roc_mcs_event_cb_register(struct roc_mcs *mcs, enum roc_mcs_event_type event, cb->cb_fn = cb_fn; cb->cb_arg = cb_arg; cb->event = event; - mcs->userdata = userdata; + cb->userdata = userdata; TAILQ_INSERT_TAIL(cb_list, cb, next); } @@ -678,7 +687,8 @@ mcs_event_cb_process(struct roc_mcs *mcs, struct roc_mcs_event_desc *desc) cb->active = 1; mcs_cb.ret_param = desc; - rc = mcs_cb.cb_fn(mcs->userdata, mcs_cb.ret_param, mcs_cb.cb_arg); + rc = mcs_cb.cb_fn(mcs_cb.userdata, mcs_cb.ret_param, mcs_cb.cb_arg, + mcs->sa_port_map[desc->metadata.sa_idx]); cb->active = 0; } @@ -788,6 +798,10 @@ mcs_alloc_rsrc_bmap(struct roc_mcs *mcs) } } + mcs->sa_port_map = plt_zmalloc(sizeof(uint8_t) * hw->sa_entries, 0); + if (mcs->sa_port_map == NULL) + goto exit; + return rc; exit: @@ -865,6 +879,8 @@ roc_mcs_dev_fini(struct roc_mcs *mcs) plt_free(priv->port_rsrc); + plt_free(mcs->sa_port_map); + roc_idev_mcs_free(mcs); plt_free(mcs); diff --git a/drivers/common/cnxk/roc_mcs.h b/drivers/common/cnxk/roc_mcs.h index afac6c92e2b..0627865a4db 100644 --- a/drivers/common/cnxk/roc_mcs.h +++ b/drivers/common/cnxk/roc_mcs.h @@ -7,6 +7,9 @@ #define MCS_AES_GCM_256_KEYLEN 32 +#define ROC_MCS_MAX_AR_WINSZ BIT_ULL(31) +#define ROC_MCS_MAX_MTU (BIT_ULL(16) - 1) + struct roc_mcs_alloc_rsrc_req { uint8_t rsrc_type; uint8_t rsrc_cnt; /* Resources count */ @@ -477,15 +480,17 @@ struct roc_mcs_fips_result_rsp { }; /** User application callback to be registered for any notifications from driver. */ -typedef int (*roc_mcs_dev_cb_fn)(void *userdata, struct roc_mcs_event_desc *desc, void *cb_arg); +typedef int (*roc_mcs_dev_cb_fn)(void *userdata, struct roc_mcs_event_desc *desc, void *cb_arg, + uint8_t port_id); struct roc_mcs { TAILQ_ENTRY(roc_mcs) next; struct plt_pci_device *pci_dev; struct mbox *mbox; - void *userdata; uint8_t idx; uint8_t refcount; + bool intr_cfg_once; + uint8_t *sa_port_map; #define ROC_MCS_MEM_SZ (1 * 1024) uint8_t reserved[ROC_MCS_MEM_SZ] __plt_cache_aligned; @@ -556,6 +561,8 @@ __roc_api int roc_mcs_tx_sc_sa_map_write(struct roc_mcs *mcs, struct roc_mcs_tx_sc_sa_map *tx_sc_sa_map); __roc_api int roc_mcs_tx_sc_sa_map_read(struct roc_mcs *mcs, struct roc_mcs_tx_sc_sa_map *tx_sc_sa_map); +/* SA to Port map update */ +__roc_api void roc_mcs_sa_port_map_update(struct roc_mcs *mcs, int sa_id, uint8_t port_id); /* Flow entry read, write and enable */ __roc_api int roc_mcs_flowid_entry_write(struct roc_mcs *mcs, diff --git a/drivers/common/cnxk/roc_mcs_sec_cfg.c b/drivers/common/cnxk/roc_mcs_sec_cfg.c index 7b3a4c91e81..e2fd3e7b8c9 100644 --- a/drivers/common/cnxk/roc_mcs_sec_cfg.c +++ b/drivers/common/cnxk/roc_mcs_sec_cfg.c @@ -526,3 +526,9 @@ roc_mcs_flowid_entry_enable(struct roc_mcs *mcs, struct roc_mcs_flowid_ena_dis_e return mbox_process_msg(mcs->mbox, (void *)&rsp); } + +void +roc_mcs_sa_port_map_update(struct roc_mcs *mcs, int sa_id, uint8_t port_id) +{ + mcs->sa_port_map[sa_id] = port_id; +} diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c index 152ef7269e9..f64933a1d94 100644 --- a/drivers/common/cnxk/roc_nix.c +++ b/drivers/common/cnxk/roc_nix.c @@ -21,6 +21,14 @@ roc_nix_get_base_chan(struct roc_nix *roc_nix) return nix->rx_chan_base; } +uint8_t +roc_nix_get_rx_chan_cnt(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + + return nix->rx_chan_cnt; +} + uint16_t roc_nix_get_vwqe_interval(struct roc_nix *roc_nix) { @@ -475,6 +483,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix) nix->pci_dev = pci_dev; nix->reta_sz = reta_sz; nix->mtu = ROC_NIX_DEFAULT_HW_FRS; + nix->dmac_flt_idx = -1; /* Register error and ras interrupts */ rc = nix_register_irqs(nix); diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index 66f0597242a..acdd1c4cbcb 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -196,10 +196,11 @@ struct roc_nix_fc_cfg { uint32_t rq; uint16_t tc; uint16_t cq_drop; - bool enable; uint64_t pool; uint64_t spb_pool; uint64_t pool_drop_pct; + uint64_t spb_pool_drop_pct; + bool enable; } rq_cfg; struct { @@ -366,6 +367,7 @@ struct roc_nix_cq { /* Input parameters */ uint16_t qid; uint32_t nb_desc; + uint8_t stash_thresh; /* End of Input parameters */ uint16_t drop_thresh; struct roc_nix *roc_nix; @@ -526,6 +528,7 @@ bool __roc_api roc_nix_is_sdp(struct roc_nix *roc_nix); bool __roc_api roc_nix_is_pf(struct roc_nix *roc_nix); bool __roc_api roc_nix_is_vf_or_sdp(struct roc_nix *roc_nix); int __roc_api roc_nix_get_base_chan(struct roc_nix *roc_nix); +uint8_t __roc_api roc_nix_get_rx_chan_cnt(struct roc_nix *roc_nix); int __roc_api roc_nix_get_pf(struct roc_nix *roc_nix); int __roc_api roc_nix_get_vf(struct roc_nix *roc_nix); uint16_t __roc_api roc_nix_get_pf_func(struct roc_nix *roc_nix); @@ -706,8 +709,13 @@ int __roc_api roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, /* * TM ratelimit tree API. */ -int __roc_api roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, - uint64_t rate); +int __roc_api roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate); + +/* + * TM PFC tree ratelimit API. + */ +int __roc_api roc_nix_tm_pfc_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate); + /* * TM hierarchy enable/disable API. */ diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c index d8ca5f9996c..12bfb9816b7 100644 --- a/drivers/common/cnxk/roc_nix_fc.c +++ b/drivers/common/cnxk/roc_nix_fc.c @@ -282,31 +282,31 @@ static int nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) { struct nix *nix = roc_nix_to_nix_priv(roc_nix); + uint64_t pool_drop_pct, spb_pool_drop_pct; struct roc_nix_fc_cfg tmp; - uint64_t pool_drop_pct; struct roc_nix_rq *rq; - int sso_ena = 0, rc; + int rc; rq = nix->rqs[fc_cfg->rq_cfg.rq]; - /* Check whether RQ is connected to SSO or not */ - sso_ena = roc_nix_rq_is_sso_enable(roc_nix, fc_cfg->rq_cfg.rq); - if (sso_ena < 0) - return -EINVAL; - if (sso_ena) { + if (rq->sso_ena) { pool_drop_pct = fc_cfg->rq_cfg.pool_drop_pct; /* Use default value for zero pct */ if (fc_cfg->rq_cfg.enable && !pool_drop_pct) pool_drop_pct = ROC_NIX_AURA_THRESH; - roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool, - fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp, - fc_cfg->rq_cfg.tc, pool_drop_pct); + roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool, fc_cfg->rq_cfg.enable, + roc_nix->force_rx_aura_bp, fc_cfg->rq_cfg.tc, pool_drop_pct); if (rq->spb_ena) { + spb_pool_drop_pct = fc_cfg->rq_cfg.spb_pool_drop_pct; + /* Use default value for zero pct */ + if (!spb_pool_drop_pct) + spb_pool_drop_pct = ROC_NIX_AURA_THRESH; + roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.spb_pool, fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp, - fc_cfg->rq_cfg.tc, pool_drop_pct); + fc_cfg->rq_cfg.tc, spb_pool_drop_pct); } if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle) @@ -486,12 +486,10 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id); struct nix *nix = roc_nix_to_nix_priv(roc_nix); struct npa_lf *lf = idev_npa_obj_get(); - struct npa_aq_enq_req *req; - struct npa_aq_enq_rsp *rsp; + struct npa_aura_attr *aura_attr; uint8_t bp_thresh, bp_intf; - struct mbox *mbox; uint16_t bpid; - int rc, i; + int i; if (roc_nix_is_sdp(roc_nix)) return; @@ -499,31 +497,14 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui if (!lf) return; - mbox = lf->mbox; - req = mbox_alloc_msg_npa_aq_enq(mbox_get(mbox)); - if (req == NULL) { - mbox_put(mbox); - return; - } - - req->aura_id = aura_id; - req->ctype = NPA_AQ_CTYPE_AURA; - req->op = NPA_AQ_INSTOP_READ; - - rc = mbox_process_msg(mbox, (void *)&rsp); - mbox_put(mbox); - if (rc) { - plt_nix_dbg("Failed to read context of aura 0x%" PRIx64, pool_id); - return; - } + aura_attr = &lf->aura_attr[aura_id]; bp_intf = 1 << nix->is_nix1; - bp_thresh = NIX_RQ_AURA_THRESH(drop_percent, rsp->aura.limit >> rsp->aura.shift); + bp_thresh = NIX_RQ_AURA_BP_THRESH(drop_percent, aura_attr->limit, aura_attr->shift); - bpid = (rsp->aura.bp_ena & 0x1) ? rsp->aura.nix0_bpid : rsp->aura.nix1_bpid; + bpid = (aura_attr->bp_ena & 0x1) ? aura_attr->nix0_bpid : aura_attr->nix1_bpid; /* BP is already enabled. */ - if (rsp->aura.bp_ena && ena) { - /* Disable BP if BPIDs don't match and couldn't add new BPID. */ + if (aura_attr->bp_ena && ena) { if (bpid != nix->bpid[tc]) { uint16_t bpid_new = NIX_BPID_INVALID; @@ -537,21 +518,22 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui plt_err("Enabling backpressue failed on aura 0x%" PRIx64, pool_id); } else { + aura_attr->ref_count++; plt_info("Ignoring port=%u tc=%u config on shared aura 0x%" PRIx64, roc_nix->port_id, tc, pool_id); } + } else { + aura_attr->ref_count++; } return; } - /* BP was previously enabled but now disabled skip. */ - if (rsp->aura.bp && ena) - return; - if (ena) { if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true)) plt_err("Enabling backpressue failed on aura 0x%" PRIx64, pool_id); + else + aura_attr->ref_count++; } else { bool found = !!force; @@ -561,6 +543,8 @@ roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, ui found = true; if (!found) return; + else if ((aura_attr->ref_count > 0) && --(aura_attr->ref_count)) + return; if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false)) plt_err("Disabling backpressue failed on aura 0x%" PRIx64, pool_id); diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c index 16f858f5619..750fd08355f 100644 --- a/drivers/common/cnxk/roc_nix_inl.c +++ b/drivers/common/cnxk/roc_nix_inl.c @@ -779,8 +779,10 @@ nix_inl_eng_caps_get(struct nix *nix) hw_res->cn10k.compcode = CPT_COMP_NOT_DONE; - /* Use this lcore's LMT line as no one else is using it */ - ROC_LMT_BASE_ID_GET(lmt_base, lmt_id); + /* Use this reserved LMT line as no one else is using it */ + lmt_id = roc_plt_control_lmt_id_get(); + lmt_base += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); + memcpy((void *)lmt_base, &inst, sizeof(inst)); lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id; @@ -851,6 +853,11 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix) nix->cpt_nixbpid = bpids[0]; cfg.bpid = nix->cpt_nixbpid; } + + if (roc_errata_cpt_has_ctx_fetch_issue()) { + cfg.ctx_ilen_valid = true; + cfg.ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1; + } } /* Do onetime Inbound Inline config in CPTPF */ @@ -931,7 +938,9 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix) struct dev *dev = &nix->dev; struct msix_offset_rsp *rsp; struct nix_inl_dev *inl_dev; + bool ctx_ilen_valid = false; size_t sa_sz, ring_sz; + uint8_t ctx_ilen = 0; uint16_t sso_pffunc; uint8_t eng_grpmask; uint64_t blkaddr, i; @@ -967,12 +976,17 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix) return rc; } + if (!roc_model_is_cn9k() && roc_errata_cpt_has_ctx_fetch_issue()) { + ctx_ilen = (ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ / 128) - 1; + ctx_ilen_valid = true; + } + /* Alloc CPT LF */ eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE | 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE | 1ULL << ROC_CPT_DFLT_ENG_GRP_AE); rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr, - !roc_nix->ipsec_out_sso_pffunc); + !roc_nix->ipsec_out_sso_pffunc, ctx_ilen_valid, ctx_ilen); if (rc) { plt_err("Failed to alloc CPT LF resources, rc=%d", rc); goto lf_detach; diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c index d76158e30d0..dc1306c093f 100644 --- a/drivers/common/cnxk/roc_nix_inl_dev.c +++ b/drivers/common/cnxk/roc_nix_inl_dev.c @@ -176,7 +176,9 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso) { struct roc_cpt_lf *lf = &inl_dev->cpt_lf; struct dev *dev = &inl_dev->dev; + bool ctx_ilen_valid = false; uint8_t eng_grpmask; + uint8_t ctx_ilen = 0; int rc; if (!inl_dev->attach_cptlf) @@ -186,7 +188,13 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso) eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE | 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE | 1ULL << ROC_CPT_DFLT_ENG_GRP_AE); - rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso); + if (roc_errata_cpt_has_ctx_fetch_issue()) { + ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1; + ctx_ilen_valid = true; + } + + rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid, + ctx_ilen); if (rc) { plt_err("Failed to alloc CPT LF resources, rc=%d", rc); return rc; @@ -285,7 +293,7 @@ nix_inl_sso_setup(struct nix_inl_dev *inl_dev) } /* Setup hwgrp->hws link */ - sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true); + sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, true); /* Enable HWGRP */ plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL); @@ -315,7 +323,7 @@ nix_inl_sso_release(struct nix_inl_dev *inl_dev) nix_inl_sso_unregister_irqs(inl_dev); /* Unlink hws */ - sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false); + sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, false); /* Release XAQ aura */ sso_hwgrp_release_xaq(&inl_dev->dev, 1); @@ -670,7 +678,8 @@ roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle) } /* Setup xaq for hwgrps */ - rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1); + rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, + roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1); if (rc) { plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc); return rc; @@ -746,7 +755,7 @@ inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx) } } -static void * +static uint32_t nix_inl_outb_poll_thread(void *args) { struct nix_inl_dev *inl_dev = args; @@ -816,9 +825,8 @@ nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev) soft_exp_consumer_cnt = 0; soft_exp_poll_thread_exit = false; - rc = plt_ctrl_thread_create(&inl_dev->soft_exp_poll_thread, - "OUTB_SOFT_EXP_POLL_THREAD", NULL, - nix_inl_outb_poll_thread, inl_dev); + rc = plt_thread_create_control(&inl_dev->soft_exp_poll_thread, + "outb-poll", nix_inl_outb_poll_thread, inl_dev); if (rc) { plt_bitmap_free(inl_dev->soft_exp_ring_bmap); plt_free(inl_dev->soft_exp_ring_bmap_mem); @@ -1020,7 +1028,7 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev) if (inl_dev->set_soft_exp_poll) { soft_exp_poll_thread_exit = true; - pthread_join(inl_dev->soft_exp_poll_thread, NULL); + plt_thread_join(inl_dev->soft_exp_poll_thread, NULL); plt_bitmap_free(inl_dev->soft_exp_ring_bmap); plt_free(inl_dev->soft_exp_ring_bmap_mem); plt_free(inl_dev->sa_soft_exp_ring); diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h index b0a8976c6b6..3217f4ebc17 100644 --- a/drivers/common/cnxk/roc_nix_inl_priv.h +++ b/drivers/common/cnxk/roc_nix_inl_priv.h @@ -67,7 +67,7 @@ struct nix_inl_dev { struct roc_cpt_lf cpt_lf; /* OUTB soft expiry poll thread */ - pthread_t soft_exp_poll_thread; + plt_thread_t soft_exp_poll_thread; uint32_t soft_exp_poll_freq; uint64_t *sa_soft_exp_ring; bool set_soft_exp_poll; diff --git a/drivers/common/cnxk/roc_nix_mac.c b/drivers/common/cnxk/roc_nix_mac.c index 754d75ac73c..e2e87be5250 100644 --- a/drivers/common/cnxk/roc_nix_mac.c +++ b/drivers/common/cnxk/roc_nix_mac.c @@ -81,9 +81,9 @@ int roc_nix_mac_addr_set(struct roc_nix *roc_nix, const uint8_t addr[]) { struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct cgx_mac_addr_set_or_get *req, *rsp; struct dev *dev = &nix->dev; struct mbox *mbox = mbox_get(dev->mbox); - struct cgx_mac_addr_set_or_get *req; int rc; if (roc_nix_is_vf_or_sdp(roc_nix)) { @@ -97,9 +97,13 @@ roc_nix_mac_addr_set(struct roc_nix *roc_nix, const uint8_t addr[]) } req = mbox_alloc_msg_cgx_mac_addr_set(mbox); + req->index = nix->dmac_flt_idx; mbox_memcpy(req->mac_addr, addr, PLT_ETHER_ADDR_LEN); - rc = mbox_process(mbox); + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + nix->dmac_flt_idx = rsp->index; exit: mbox_put(mbox); return rc; @@ -201,14 +205,6 @@ roc_nix_mac_promisc_mode_enable(struct roc_nix *roc_nix, int enable) goto exit; } - /* Skip CGX promisc toggling if NPC exact match is enabled as - * CGX filtering is disabled permanently. - */ - if (nix->exact_match_ena) { - rc = 0; - goto exit; - } - if (enable) mbox_alloc_msg_cgx_promisc_enable(mbox); else diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h index ea4211dfeda..a582b9df332 100644 --- a/drivers/common/cnxk/roc_nix_priv.h +++ b/drivers/common/cnxk/roc_nix_priv.h @@ -20,7 +20,7 @@ /* Apply LBP at 75% of actual BP */ #define NIX_CQ_LPB_THRESH_FRAC (75 * 16 / 100) #define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256) -#define NIX_RQ_AURA_THRESH(percent, val) (((val) * (percent)) / 100) +#define NIX_RQ_AURA_BP_THRESH(percent, limit, shift) ((((limit) * (percent)) / 100) >> (shift)) /* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */ #define CQ_CQE_THRESH_DEFAULT 0x1ULL @@ -153,6 +153,7 @@ struct nix { uint8_t sdp_links; uint8_t tx_link; uint16_t sqb_size; + uint32_t dmac_flt_idx; /* Without FCS, with L2 overhead */ uint16_t mtu; uint16_t chan_cnt; diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index 08e8bf7ea2d..f96d5c3a967 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -7,6 +7,9 @@ #include "roc_api.h" #include "roc_priv.h" +/* Default SQB slack per SQ */ +#define ROC_NIX_SQB_SLACK_DFLT 24 + static inline uint32_t nix_qsize_to_val(enum nix_q_size qsize) { @@ -905,6 +908,13 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) } cq_ctx->bp = cq->drop_thresh; + if (roc_feature_nix_has_cqe_stash()) { + if (cq_ctx->caching) { + cq_ctx->stashing = 1; + cq_ctx->stash_thresh = cq->stash_thresh; + } + } + rc = mbox_process(mbox); mbox_put(mbox); if (rc) @@ -1012,7 +1022,10 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb); sq->nb_sqb_bufs_adj = nb_sqb_bufs; - nb_sqb_bufs += PLT_MAX(thr, roc_nix->sqb_slack); + if (roc_nix->sqb_slack) + nb_sqb_bufs += roc_nix->sqb_slack; + else + nb_sqb_bufs += PLT_MAX((int)thr, (int)ROC_NIX_SQB_SLACK_DFLT); /* Explicitly set nat_align alone as by default pool is with both * nat_align and buf_offset = 1 which we don't want for SQB. */ diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c index 1e93191a07e..7a9619b39d6 100644 --- a/drivers/common/cnxk/roc_nix_stats.c +++ b/drivers/common/cnxk/roc_nix_stats.c @@ -400,14 +400,14 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats, if (rc) goto exit; - for (i = 0; i < roc_nix_num_rx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_RX_XSTATS_CGX; i++) { xstats[count].value = cgx_resp->rx_stats[nix_rx_xstats_cgx[i].offset]; xstats[count].id = count; count++; } - for (i = 0; i < roc_nix_num_tx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_TX_XSTATS_CGX; i++) { xstats[count].value = cgx_resp->tx_stats[nix_tx_xstats_cgx[i].offset]; xstats[count].id = count; @@ -426,14 +426,14 @@ roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats, if (rc) goto exit; - for (i = 0; i < roc_nix_num_rx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_RX_XSTATS_RPM; i++) { xstats[count].value = rpm_resp->rx_stats[nix_rx_xstats_rpm[i].offset]; xstats[count].id = count; count++; } - for (i = 0; i < roc_nix_num_tx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_TX_XSTATS_RPM; i++) { xstats[count].value = rpm_resp->tx_stats[nix_tx_xstats_rpm[i].offset]; xstats[count].id = count; @@ -504,26 +504,26 @@ roc_nix_xstats_names_get(struct roc_nix *roc_nix, return count; if (roc_model_is_cn9k()) { - for (i = 0; i < roc_nix_num_rx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_RX_XSTATS_CGX; i++) { NIX_XSTATS_NAME_PRINT(xstats_names, count, nix_rx_xstats_cgx, i); count++; } - for (i = 0; i < roc_nix_num_tx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_TX_XSTATS_CGX; i++) { NIX_XSTATS_NAME_PRINT(xstats_names, count, nix_tx_xstats_cgx, i); count++; } } else { - for (i = 0; i < roc_nix_num_rx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_RX_XSTATS_RPM; i++) { NIX_XSTATS_NAME_PRINT(xstats_names, count, nix_rx_xstats_rpm, i); count++; } - for (i = 0; i < roc_nix_num_tx_xstats(); i++) { + for (i = 0; i < CNXK_NIX_NUM_TX_XSTATS_RPM; i++) { NIX_XSTATS_NAME_PRINT(xstats_names, count, nix_tx_xstats_rpm, i); count++; diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c index c1046113550..ece88b5e994 100644 --- a/drivers/common/cnxk/roc_nix_tm.c +++ b/drivers/common/cnxk/roc_nix_tm.c @@ -11,7 +11,7 @@ bitmap_ctzll(uint64_t slab) if (slab == 0) return 0; - return __builtin_ctzll(slab); + return plt_ctz64(slab); } void @@ -610,8 +610,6 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq) return 0; exit: - roc_nix_tm_dump(sq->roc_nix, NULL); - roc_nix_queues_ctx_dump(sq->roc_nix, NULL); return -EFAULT; } @@ -748,6 +746,70 @@ roc_nix_tm_sq_free_pending_sqe(struct nix *nix, int q) return 0; } +static inline int +nix_tm_sdp_sq_drop_pkts(struct roc_nix *roc_nix, struct roc_nix_sq *sq) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct mbox *mbox = mbox_get((&nix->dev)->mbox); + struct nix_txschq_config *req = NULL, *rsp; + enum roc_nix_tm_tree tree = nix->tm_tree; + int rc = 0, qid = sq->qid; + struct nix_tm_node *node; + uint64_t regval; + + /* Find the node for this SQ */ + node = nix_tm_node_search(nix, qid, tree); + while (node) { + if (node->hw_lvl != NIX_TXSCH_LVL_TL4) { + node = node->parent; + continue; + } + break; + } + if (!node) { + plt_err("Invalid node/state for sq %u", qid); + return -EFAULT; + } + + /* Get present link config */ + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->read = 1; + req->lvl = NIX_TXSCH_LVL_TL4; + req->reg[0] = NIX_AF_TL4X_SDP_LINK_CFG(node->hw_id); + req->num_regs = 1; + rc = mbox_process_msg(mbox, (void **)&rsp); + if (rc || rsp->num_regs != 1) + goto err; + regval = rsp->regval[0]; + /* Disable BP_ENA in SDP link config */ + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_TL4; + req->reg[0] = NIX_AF_TL4X_SDP_LINK_CFG(node->hw_id); + req->regval[0] = 0x0ull; + req->regval_mask[0] = ~(BIT_ULL(13)); + req->num_regs = 1; + rc = mbox_process(mbox); + if (rc) + goto err; + mbox_put(mbox); + /* Flush SQ to drop all packets */ + rc = roc_nix_tm_sq_flush_spin(sq); + if (rc) + plt_nix_dbg("SQ flush failed with link reset config rc %d", rc); + mbox = mbox_get((&nix->dev)->mbox); + /* Restore link config */ + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->reg[0] = NIX_AF_TL4X_SDP_LINK_CFG(node->hw_id); + req->lvl = NIX_TXSCH_LVL_TL4; + req->regval[0] = regval; + req->regval_mask[0] = ~(BIT_ULL(13) | BIT_ULL(12) | GENMASK_ULL(7, 0)); + req->num_regs = 1; + rc = mbox_process(mbox); +err: + mbox_put(mbox); + return rc; +} + /* Flush and disable tx queue and its parent SMQ */ int nix_tm_sq_flush_pre(struct roc_nix_sq *sq) @@ -834,8 +896,13 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq) /* Wait for sq entries to be flushed */ rc = roc_nix_tm_sq_flush_spin(sq); if (rc) { - rc = roc_nix_tm_sq_free_pending_sqe(nix, sq->qid); + if (nix->sdp_link) + rc = nix_tm_sdp_sq_drop_pkts(roc_nix, sq); + else + rc = roc_nix_tm_sq_free_pending_sqe(nix, sq->qid); if (rc) { + roc_nix_tm_dump(sq->roc_nix, NULL); + roc_nix_queues_ctx_dump(sq->roc_nix, NULL); plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc); return rc; } diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c index 4e88ad1beb7..e1cef7a670b 100644 --- a/drivers/common/cnxk/roc_nix_tm_ops.c +++ b/drivers/common/cnxk/roc_nix_tm_ops.c @@ -1032,6 +1032,104 @@ roc_nix_tm_init(struct roc_nix *roc_nix) return rc; } +int +roc_nix_tm_pfc_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct nix_tm_shaper_profile profile; + struct mbox *mbox = (&nix->dev)->mbox; + struct nix_tm_node *node, *parent; + struct roc_nix_link_info link_info; + + volatile uint64_t *reg, *regval; + struct nix_txschq_config *req; + uint64_t tl2_rate = 0; + uint16_t flags; + uint8_t k = 0; + int rc; + + if ((nix->tm_tree != ROC_NIX_TM_PFC) || !(nix->tm_flags & NIX_TM_HIERARCHY_ENA)) + return NIX_ERR_TM_INVALID_TREE; + + node = nix_tm_node_search(nix, qid, nix->tm_tree); + + /* check if we found a valid leaf node */ + if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent || + node->parent->hw_id == NIX_TM_HW_ID_INVALID) { + return NIX_ERR_TM_INVALID_NODE; + } + + /* Get the link Speed */ + if (roc_nix_mac_link_info_get(roc_nix, &link_info)) + return -EINVAL; + + if (link_info.status) + tl2_rate = link_info.speed * (uint64_t)1E6; + + /* Configure TL3 of leaf node with requested rate */ + parent = node->parent; /* SMQ/MDQ */ + parent = parent->parent; /* TL4 */ + parent = parent->parent; /* TL3 */ + flags = parent->flags; + + req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox)); + req->lvl = parent->hw_lvl; + reg = req->reg; + regval = req->regval; + + if (rate == 0) { + k += nix_tm_sw_xoff_prep(parent, true, ®[k], ®val[k]); + flags &= ~NIX_TM_NODE_ENABLED; + goto exit; + } + + if (!(flags & NIX_TM_NODE_ENABLED)) { + k += nix_tm_sw_xoff_prep(parent, false, ®[k], ®val[k]); + flags |= NIX_TM_NODE_ENABLED; + } + + /* Use only PIR for rate limit */ + memset(&profile, 0, sizeof(profile)); + profile.peak.rate = rate; + /* Minimum burst of ~4us Bytes of Tx */ + profile.peak.size = + PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix), (4ul * rate) / ((uint64_t)1E6 * 8)); + if (!nix->tm_rate_min || nix->tm_rate_min > rate) + nix->tm_rate_min = rate; + + k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]); +exit: + req->num_regs = k; + rc = mbox_process(mbox); + mbox_put(mbox); + if (rc) + return rc; + + parent->flags = flags; + + /* If link is up then configure TL2 with link speed */ + if (tl2_rate && (flags & NIX_TM_NODE_ENABLED)) { + k = 0; + parent = parent->parent; + req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox)); + req->lvl = parent->hw_lvl; + reg = req->reg; + regval = req->regval; + + /* Use only PIR for rate limit */ + memset(&profile, 0, sizeof(profile)); + profile.peak.rate = tl2_rate; + /* Minimum burst of ~4us Bytes of Tx */ + profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix), + (4ul * tl2_rate) / ((uint64_t)1E6 * 8)); + k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]); + req->num_regs = k; + rc = mbox_process(mbox); + mbox_put(mbox); + } + return rc; +} + int roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate) { diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c index 3840d6d4573..8e3da95a45b 100644 --- a/drivers/common/cnxk/roc_nix_tm_utils.c +++ b/drivers/common/cnxk/roc_nix_tm_utils.c @@ -588,7 +588,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); regval[k] = BIT_ULL(12); regval[k] |= BIT_ULL(13); - regval[k] |= relchan; + regval[k] |= (uint64_t)relchan; k++; } break; @@ -606,7 +606,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, if (!nix->sdp_link && nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) { reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); - regval[k] = BIT_ULL(12) | relchan; + regval[k] = BIT_ULL(12) | (uint64_t)relchan; k++; } @@ -625,7 +625,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, if (!nix->sdp_link && nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) { reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); - regval[k] = BIT_ULL(12) | relchan; + regval[k] = BIT_ULL(12) | (uint64_t)relchan; k++; } @@ -927,7 +927,7 @@ nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig) /* Count bit set */ start_pos = pos; do { - count += __builtin_popcountll(slab); + count += plt_popcount64(slab); if (!plt_bitmap_scan(bmp, &pos, &slab)) break; } while (pos != start_pos); diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c index 3b9a70028ba..b76b8e2342e 100644 --- a/drivers/common/cnxk/roc_npa.c +++ b/drivers/common/cnxk/roc_npa.c @@ -185,6 +185,8 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle) aura_req->op = NPA_AQ_INSTOP_WRITE; aura_req->aura.ena = 0; aura_req->aura_mask.ena = ~aura_req->aura_mask.ena; + aura_req->aura.bp_ena = 0; + aura_req->aura_mask.bp_ena = ~aura_req->aura_mask.bp_ena; rc = mbox_process(mbox); if (rc < 0) @@ -398,7 +400,7 @@ bitmap_ctzll(uint64_t slab) if (slab == 0) return 0; - return __builtin_ctzll(slab); + return plt_ctz64(slab); } static int @@ -535,6 +537,8 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size, if (rc) goto stack_mem_free; + lf->aura_attr[aura_id].shift = aura->shift; + lf->aura_attr[aura_id].limit = aura->limit; *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base); /* Update aura count */ roc_npa_aura_op_cnt_set(*aura_handle, 0, block_count); @@ -657,6 +661,8 @@ npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id, if (rc) return rc; + lf->aura_attr[aura_id].shift = aura->shift; + lf->aura_attr[aura_id].limit = aura->limit; *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base); return 0; @@ -735,6 +741,9 @@ roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit) aura_req->aura.limit = aura_limit; aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit); rc = mbox_process(mbox); + if (rc) + goto exit; + lf->aura_attr[aura_req->aura_id].limit = aura_req->aura.limit; exit: mbox_put(mbox); return rc; @@ -931,7 +940,14 @@ roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, req->aura.bp_ena = bp_intf; req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena); - mbox_process(mbox); + rc = mbox_process(mbox); + if (rc) + goto fail; + + lf->aura_attr[aura_id].nix0_bpid = req->aura.nix0_bpid; + lf->aura_attr[aura_id].nix1_bpid = req->aura.nix1_bpid; + lf->aura_attr[aura_id].bp_ena = req->aura.bp_ena; + lf->aura_attr[aura_id].bp = req->aura.bp; fail: mbox_put(mbox); return rc; diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h index d2118cc4fb8..060df9ab049 100644 --- a/drivers/common/cnxk/roc_npa_priv.h +++ b/drivers/common/cnxk/roc_npa_priv.h @@ -49,6 +49,13 @@ struct npa_aura_lim { struct npa_aura_attr { int buf_type[ROC_NPA_BUF_TYPE_END]; + uint16_t ref_count; + uint64_t nix0_bpid; + uint64_t nix1_bpid; + uint64_t shift; + uint64_t limit; + uint8_t bp_ena; + uint8_t bp; }; struct dev; diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c index 848086c8de5..f36f5e42c84 100644 --- a/drivers/common/cnxk/roc_npc.c +++ b/drivers/common/cnxk/roc_npc.c @@ -302,6 +302,7 @@ roc_npc_init(struct roc_npc *roc_npc) npc_mem = mem; TAILQ_INIT(&npc->ipsec_list); + TAILQ_INIT(&npc->age_flow_list); for (idx = 0; idx < npc->flow_max_priority; idx++) { TAILQ_INIT(&npc->flow_list[idx]); TAILQ_INIT(&npc->prio_flow_list[idx]); @@ -330,6 +331,9 @@ roc_npc_init(struct roc_npc *roc_npc) */ plt_bitmap_set(npc->rss_grp_entries, 0); + rc = npc_aged_flows_bitmap_alloc(roc_npc); + if (rc != 0) + goto done; return rc; done: @@ -610,6 +614,17 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, flow->mtr_id = act_mtr->mtr_id; req_act |= ROC_NPC_ACTION_TYPE_METER; break; + case ROC_NPC_ACTION_TYPE_AGE: + if (flow->is_validate == true) + break; + plt_seqcount_init(&roc_npc->flow_age.seq_cnt); + errcode = npc_aging_ctrl_thread_create(roc_npc, + actions->conf, + flow); + if (errcode != 0) + goto err_exit; + req_act |= ROC_NPC_ACTION_TYPE_AGE; + break; default: errcode = NPC_ERR_ACTION_NOTSUP; goto err_exit; @@ -726,11 +741,15 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, if (req_act == ROC_NPC_ACTION_TYPE_VLAN_STRIP) { /* Only VLAN action is provided */ flow->npc_action = NIX_RX_ACTIONOP_UCAST; - } else if (req_act & - (ROC_NPC_ACTION_TYPE_PF | ROC_NPC_ACTION_TYPE_VF)) { - flow->npc_action = NIX_RX_ACTIONOP_UCAST; - if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) - flow->npc_action |= (uint64_t)rq << 20; + } else if (req_act & (ROC_NPC_ACTION_TYPE_PF | ROC_NPC_ACTION_TYPE_VF)) { + /* Check if any other action is set */ + if ((req_act == ROC_NPC_ACTION_TYPE_PF) || (req_act == ROC_NPC_ACTION_TYPE_VF)) { + flow->npc_action = NIX_RX_ACTIONOP_DEFAULT; + } else { + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) + flow->npc_action |= (uint64_t)rq << 20; + } } else if (req_act & ROC_NPC_ACTION_TYPE_DROP) { flow->npc_action = NIX_RX_ACTIONOP_DROP; } else if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) { @@ -741,8 +760,7 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, } else if (req_act & ROC_NPC_ACTION_TYPE_SEC) { flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC; flow->npc_action |= (uint64_t)rq << 20; - } else if (req_act & - (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) { + } else if (req_act & (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) { flow->npc_action = NIX_RX_ACTIONOP_UCAST; } else if (req_act & ROC_NPC_ACTION_TYPE_COUNT) { /* Keep ROC_NPC_ACTION_TYPE_COUNT_ACT always at the end @@ -925,9 +943,35 @@ npc_rss_action_configure(struct roc_npc *roc_npc, uint8_t key[ROC_NIX_RSS_KEY_LEN]; const uint8_t *key_ptr; uint8_t flowkey_algx; + uint32_t key_len; uint16_t *reta; int rc; + roc_nix_rss_key_get(roc_nix, key); + if (rss->key == NULL) { + key_ptr = key; + } else { + key_len = rss->key_len; + if (key_len > ROC_NIX_RSS_KEY_LEN) + key_len = ROC_NIX_RSS_KEY_LEN; + + for (i = 0; i < key_len; i++) { + if (key[i] != rss->key[i]) { + plt_err("RSS key config not supported"); + plt_err("New Key:"); + for (i = 0; i < key_len; i++) + plt_dump_no_nl("0x%.2x ", rss->key[i]); + plt_dump_no_nl("\n"); + plt_err("Configured Key:"); + for (i = 0; i < ROC_NIX_RSS_KEY_LEN; i++) + plt_dump_no_nl("0x%.2x ", key[i]); + plt_dump_no_nl("\n"); + return -ENOTSUP; + } + } + key_ptr = rss->key; + } + rc = npc_rss_free_grp_get(npc, &rss_grp_idx); /* RSS group :0 is not usable for flow rss action */ if (rc < 0 || rss_grp_idx == 0) @@ -942,13 +986,6 @@ npc_rss_action_configure(struct roc_npc *roc_npc, *rss_grp = rss_grp_idx; - if (rss->key == NULL) { - roc_nix_rss_key_default_fill(roc_nix, key); - key_ptr = key; - } else { - key_ptr = rss->key; - } - roc_nix_rss_key_set(roc_nix, key_ptr); /* If queue count passed in the rss action is less than @@ -1398,7 +1435,7 @@ roc_npc_sdp_channel_get(struct roc_npc *roc_npc, uint16_t *chan_base, uint16_t * num_chan = nix->rx_chan_cnt - 1; if (num_chan) { range = *chan_base ^ (*chan_base + num_chan); - num_bits = (sizeof(uint32_t) * 8) - __builtin_clz(range) - 1; + num_bits = (sizeof(uint32_t) * 8) - plt_clz32(range) - 1; /* Set mask for (15 - numbits) MSB bits */ *chan_mask = (uint16_t)~GENMASK(num_bits, 0); } else { @@ -1485,6 +1522,9 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, } } TAILQ_INSERT_TAIL(list, flow, next); + + npc_age_flow_list_entry_add(roc_npc, flow); + return flow; set_rss_failed: @@ -1582,6 +1622,11 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow) npc_delete_prio_list_entry(npc, flow); + npc_age_flow_list_entry_delete(roc_npc, flow); + if (roc_npc->flow_age.age_flow_refcnt == 0 && + plt_thread_is_valid(roc_npc->flow_age.aged_flows_poll_thread)) + npc_aging_ctrl_thread_destroy(roc_npc); + done: plt_free(flow); return 0; diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h index 48e550a6957..cf7e6c9548c 100644 --- a/drivers/common/cnxk/roc_npc.h +++ b/drivers/common/cnxk/roc_npc.h @@ -40,6 +40,7 @@ enum roc_npc_item_type { ROC_NPC_ITEM_TYPE_RAW, ROC_NPC_ITEM_TYPE_MARK, ROC_NPC_ITEM_TYPE_TX_QUEUE, + ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT, ROC_NPC_ITEM_TYPE_END, }; @@ -128,6 +129,22 @@ struct roc_ipv6_fragment_ext { uint32_t id; /**< Packet ID */ } __plt_packed; +struct roc_ipv6_routing_ext { + uint8_t next_hdr; /**< Protocol, next header. */ + uint8_t hdr_len; /**< Header length. */ + uint8_t type; /**< Extension header type. */ + uint8_t segments_left; /**< Valid segments number. */ + union { + uint32_t flags; /**< Packet control data per type. */ + struct { + uint8_t last_entry; /**< The last_entry field of SRH */ + uint8_t flag; /**< Packet flag. */ + uint16_t tag; /**< Packet tag. */ + }; + }; + /* Next are 128-bit IPv6 address fields to describe segments. */ +} __plt_packed; + struct roc_flow_item_ipv6_ext { uint8_t next_hdr; /**< Next header. */ }; @@ -177,6 +194,7 @@ enum roc_npc_action_type { ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT = (1 << 15), ROC_NPC_ACTION_TYPE_PORT_ID = (1 << 16), ROC_NPC_ACTION_TYPE_METER = (1 << 17), + ROC_NPC_ACTION_TYPE_AGE = (1 << 18), }; struct roc_npc_action { @@ -200,6 +218,13 @@ struct roc_npc_action_port_id { uint32_t id; /**< port ID. */ }; +struct roc_npc_action_age { + uint32_t timeout : 24; /**< Time in seconds. */ + uint32_t reserved : 8; /**< Reserved, must be zero. */ + /** The user flow context, NULL means the flow pointer. */ + void *context; +}; + /** * ESP Header */ @@ -292,6 +317,9 @@ struct roc_npc_flow { uint16_t match_id; uint8_t is_inline_dev; bool use_pre_alloc; + uint64_t timeout_cycles; + void *age_context; + uint32_t timeout; TAILQ_ENTRY(roc_npc_flow) next; }; @@ -324,6 +352,19 @@ enum flow_vtag_cfg_dir { VTAG_TX, VTAG_RX }; #define ROC_ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */ #define ROC_ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */ +struct roc_npc_flow_age { + plt_seqcount_t seq_cnt; + uint32_t aging_poll_freq; + uint32_t age_flow_refcnt; + uint32_t aged_flows_cnt; + uint32_t start_id; + uint32_t end_id; + plt_thread_t aged_flows_poll_thread; + struct plt_bitmap *aged_flows; + void *age_mem; + bool aged_flows_get_thread_exit; +}; + struct roc_npc { struct roc_nix *roc_nix; uint8_t switch_header_type; @@ -346,11 +387,14 @@ struct roc_npc { bool is_sdp_mask_set; uint16_t sdp_channel; uint16_t sdp_channel_mask; + struct roc_npc_flow_age flow_age; #define ROC_NPC_MEM_SZ (6 * 1024) uint8_t reserved[ROC_NPC_MEM_SZ]; } __plt_cache_aligned; +#define ROC_NPC_AGE_POLL_FREQ_MIN 10 + int __roc_api roc_npc_init(struct roc_npc *roc_npc); int __roc_api roc_npc_fini(struct roc_npc *roc_npc); const char *__roc_api roc_npc_profile_name_get(struct roc_npc *roc_npc); @@ -394,4 +438,5 @@ int __roc_api roc_npc_validate_portid_action(struct roc_npc *roc_npc_src, struct roc_npc *roc_npc_dst); int __roc_api roc_npc_mcam_init(struct roc_npc *roc_npc, struct roc_npc_flow *flow, int mcam_id); int __roc_api roc_npc_mcam_move(struct roc_npc *roc_npc, uint16_t old_ent, uint16_t new_ent); +void *__roc_api roc_npc_aged_flow_ctx_get(struct roc_npc *roc_npc, uint32_t mcam_id); #endif /* _ROC_NPC_H_ */ diff --git a/drivers/common/cnxk/roc_npc_aging.c b/drivers/common/cnxk/roc_npc_aging.c new file mode 100644 index 00000000000..74543f227bb --- /dev/null +++ b/drivers/common/cnxk/roc_npc_aging.c @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell. + */ + +#include "roc_api.h" +#include "roc_priv.h" + +int +npc_aged_flows_bitmap_alloc(struct roc_npc *roc_npc) +{ + struct roc_npc_flow_age *flow_age; + uint8_t *age_mem = NULL; + uint32_t bmap_sz; + int rc = 0; + + bmap_sz = plt_bitmap_get_memory_footprint(MCAM_ARR_ELEM_SZ * + MCAM_ARR_SIZE); + age_mem = plt_zmalloc(bmap_sz, 0); + if (age_mem == NULL) { + plt_err("Bmap alloc failed"); + rc = NPC_ERR_NO_MEM; + goto done; + } + + flow_age = &roc_npc->flow_age; + flow_age->age_mem = age_mem; + flow_age->aged_flows = plt_bitmap_init(MCAM_ARR_ELEM_SZ * MCAM_ARR_SIZE, + age_mem, bmap_sz); + if (!flow_age->aged_flows) { + plt_err("Bitmap init failed"); + plt_free(age_mem); + rc = NPC_ERR_NO_MEM; + goto done; + } + + flow_age->age_flow_refcnt = 0; +done: + return rc; +} + +void +npc_aged_flows_bitmap_free(struct roc_npc *roc_npc) +{ + struct roc_npc_flow_age *flow_age; + + flow_age = &roc_npc->flow_age; + plt_bitmap_free(flow_age->aged_flows); + if (flow_age->age_mem) + plt_free(roc_npc->flow_age.age_mem); +} + +static void +check_timeout_cycles(struct roc_npc *roc_npc, uint32_t mcam_id) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_age_flow_list_head *list; + struct npc_age_flow_entry *fl_iter; + struct roc_npc_flow_age *flow_age; + + flow_age = &roc_npc->flow_age; + list = &npc->age_flow_list; + TAILQ_FOREACH(fl_iter, list, next) { + if (fl_iter->flow->mcam_id == mcam_id && + fl_iter->flow->timeout_cycles < plt_tsc_cycles()) { + /* update bitmap */ + plt_bitmap_set(flow_age->aged_flows, mcam_id); + if (flow_age->aged_flows_cnt == 0) { + flow_age->start_id = mcam_id; + flow_age->end_id = mcam_id; + } + if (flow_age->start_id > mcam_id) + flow_age->start_id = mcam_id; + else if (flow_age->end_id < mcam_id) + flow_age->end_id = mcam_id; + flow_age->aged_flows_cnt += 1; + break; + } + } +} + +static void +update_timeout_cycles(struct roc_npc *roc_npc, uint32_t mcam_id) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_age_flow_list_head *list; + struct npc_age_flow_entry *fl_iter; + + list = &npc->age_flow_list; + TAILQ_FOREACH(fl_iter, list, next) { + if (fl_iter->flow->mcam_id == mcam_id) { + fl_iter->flow->timeout_cycles = plt_tsc_cycles() + + fl_iter->flow->timeout * plt_tsc_hz(); + break; + } + } +} + +static int +npc_mcam_get_hit_status(struct npc *npc, uint64_t *mcam_ids, uint16_t start_id, + uint16_t end_id, uint64_t *hit_status, bool clear) +{ + struct npc_mcam_get_hit_status_req *req; + struct npc_mcam_get_hit_status_rsp *rsp; + struct mbox *mbox = mbox_get(npc->mbox); + uint8_t idx_start; + uint8_t idx_end; + int rc; + int i; + + req = mbox_alloc_msg_npc_mcam_get_hit_status(mbox); + if (req == NULL) + return -ENOSPC; + + idx_start = start_id / MCAM_ARR_ELEM_SZ; + idx_end = end_id / MCAM_ARR_ELEM_SZ; + + for (i = idx_start; i <= idx_end; i++) + req->mcam_ids[i] = mcam_ids[i]; + + req->range_valid_mcam_ids_start = start_id; + req->range_valid_mcam_ids_end = end_id; + req->clear = clear; + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + for (i = idx_start; i <= idx_end; i++) + hit_status[i] = rsp->mcam_hit_status[i]; + + rc = 0; +exit: + mbox_put(mbox); + return rc; +} + +uint32_t +npc_aged_flows_get(void *args) +{ + uint64_t hit_status[MCAM_ARR_SIZE] = {0}; + uint64_t mcam_ids[MCAM_ARR_SIZE] = {0}; + struct npc_age_flow_list_head *list; + struct npc_age_flow_entry *fl_iter; + struct roc_npc *roc_npc = args; + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct roc_npc_flow_age *flow_age; + bool aging_enabled; + uint32_t start_id; + uint32_t end_id; + uint32_t mcam_id; + uint32_t idx; + uint32_t i; + int rc; + + flow_age = &roc_npc->flow_age; + list = &npc->age_flow_list; + while (!flow_age->aged_flows_get_thread_exit) { + start_id = 0; + end_id = 0; + aging_enabled = false; + memset(mcam_ids, 0, sizeof(mcam_ids)); + TAILQ_FOREACH(fl_iter, list, next) { + mcam_id = fl_iter->flow->mcam_id; + idx = mcam_id / MCAM_ARR_ELEM_SZ; + mcam_ids[idx] |= BIT_ULL(mcam_id % MCAM_ARR_ELEM_SZ); + + if (!aging_enabled) { + start_id = mcam_id; + end_id = mcam_id; + aging_enabled = true; + } + + if (mcam_id < start_id) + start_id = mcam_id; + else if (mcam_id > end_id) + end_id = mcam_id; + } + + if (!aging_enabled) + goto lbl_sleep; + + rc = npc_mcam_get_hit_status(npc, mcam_ids, start_id, end_id, + hit_status, true); + if (rc) + return 0; + + plt_seqcount_write_begin(&flow_age->seq_cnt); + flow_age->aged_flows_cnt = 0; + for (i = start_id; i <= end_id; i++) { + idx = i / MCAM_ARR_ELEM_SZ; + if (mcam_ids[idx] & BIT_ULL(i % MCAM_ARR_ELEM_SZ)) { + if (!(hit_status[idx] & BIT_ULL(i % MCAM_ARR_ELEM_SZ))) + check_timeout_cycles(roc_npc, i); + else + update_timeout_cycles(roc_npc, i); + } + } + plt_seqcount_write_end(&flow_age->seq_cnt); + +lbl_sleep: + sleep(flow_age->aging_poll_freq); + } + + return 0; +} + +void +npc_age_flow_list_entry_add(struct roc_npc *roc_npc, struct roc_npc_flow *flow) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_age_flow_entry *age_fl_iter; + struct npc_age_flow_entry *new_entry; + + new_entry = plt_zmalloc(sizeof(*new_entry), 0); + if (new_entry == NULL) { + plt_err("flow entry alloc failed"); + return; + } + + new_entry->flow = flow; + roc_npc->flow_age.age_flow_refcnt++; + /* List in ascending order of mcam entries */ + TAILQ_FOREACH(age_fl_iter, &npc->age_flow_list, next) { + if (age_fl_iter->flow->mcam_id > flow->mcam_id) { + TAILQ_INSERT_BEFORE(age_fl_iter, new_entry, next); + return; + } + } + TAILQ_INSERT_TAIL(&npc->age_flow_list, new_entry, next); +} + +void +npc_age_flow_list_entry_delete(struct roc_npc *roc_npc, + struct roc_npc_flow *flow) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_age_flow_list_head *list; + struct npc_age_flow_entry *curr; + + list = &npc->age_flow_list; + curr = TAILQ_FIRST(list); + + if (!curr) + return; + + while (curr) { + if (flow->mcam_id == curr->flow->mcam_id) { + TAILQ_REMOVE(list, curr, next); + plt_free(curr); + break; + } + curr = TAILQ_NEXT(curr, next); + } + roc_npc->flow_age.age_flow_refcnt--; +} + +int +npc_aging_ctrl_thread_create(struct roc_npc *roc_npc, + const struct roc_npc_action_age *age, + struct roc_npc_flow *flow) +{ + struct roc_npc_flow_age *flow_age; + int errcode = 0; + + flow_age = &roc_npc->flow_age; + if (age->timeout < flow_age->aging_poll_freq) { + plt_err("Age timeout should be greater or equal to %u seconds", + flow_age->aging_poll_freq); + errcode = NPC_ERR_ACTION_NOTSUP; + goto done; + } + + flow->age_context = age->context == NULL ? flow : age->context; + flow->timeout = age->timeout; + flow->timeout_cycles = plt_tsc_cycles() + age->timeout * plt_tsc_hz(); + + if (flow_age->age_flow_refcnt == 0) { + flow_age->aged_flows_get_thread_exit = false; + if (plt_thread_create_control(&flow_age->aged_flows_poll_thread, + "Aged Flows Get Ctrl Thread", + npc_aged_flows_get, roc_npc) != 0) { + plt_err("Failed to create thread for age flows"); + errcode = NPC_ERR_ACTION_NOTSUP; + goto done; + } + } +done: + return errcode; +} + +void +npc_aging_ctrl_thread_destroy(struct roc_npc *roc_npc) +{ + struct roc_npc_flow_age *flow_age; + + flow_age = &roc_npc->flow_age; + flow_age->aged_flows_get_thread_exit = true; + plt_thread_join(flow_age->aged_flows_poll_thread, NULL); + npc_aged_flows_bitmap_free(roc_npc); +} + +void * +roc_npc_aged_flow_ctx_get(struct roc_npc *roc_npc, uint32_t mcam_id) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_age_flow_list_head *list; + struct npc_age_flow_entry *fl_iter; + + list = &npc->age_flow_list; + + TAILQ_FOREACH(fl_iter, list, next) { + if (fl_iter->flow->mcam_id == mcam_id) + return fl_iter->flow->age_context; + } + + return NULL; +} diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c index 62e0ce21b2a..41edec7d8d0 100644 --- a/drivers/common/cnxk/roc_npc_mcam.c +++ b/drivers/common/cnxk/roc_npc_mcam.c @@ -745,7 +745,7 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_ * For all other rules, set LA LTYPE to match both 1st pass and 2nd pass ltypes. */ if (pst->is_second_pass_rule || (!pst->is_second_pass_rule && pst->has_eth_type)) { - la_offset = __builtin_popcount(npc->keyx_supp_nmask[flow->nix_intf] & + la_offset = plt_popcount32(npc->keyx_supp_nmask[flow->nix_intf] & ((1ULL << 9 /* LA offset */) - 1)); la_offset *= 4; @@ -790,7 +790,7 @@ npc_set_vlan_ltype(struct npc_parse_state *pst) uint8_t lb_offset; lb_offset = - __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] & + plt_popcount32(pst->npc->keyx_supp_nmask[pst->nix_intf] & ((1ULL << NPC_LTYPE_LB_OFFSET) - 1)); lb_offset *= 4; @@ -812,7 +812,7 @@ npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst) uint64_t val, mask; lc_offset = - __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] & + plt_popcount32(pst->npc->keyx_supp_nmask[pst->nix_intf] & ((1ULL << NPC_LTYPE_LC_OFFSET) - 1)); lc_offset *= 4; @@ -835,7 +835,7 @@ npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst) * zero in LFLAG. */ if (pst->npc->keyx_supp_nmask[pst->nix_intf] & (1ULL << NPC_LFLAG_LC_OFFSET)) { - lcflag_offset = __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] & + lcflag_offset = plt_popcount32(pst->npc->keyx_supp_nmask[pst->nix_intf] & ((1ULL << NPC_LFLAG_LC_OFFSET) - 1)); lcflag_offset *= 4; diff --git a/drivers/common/cnxk/roc_npc_mcam_dump.c b/drivers/common/cnxk/roc_npc_mcam_dump.c index a88e166fbee..ebd2dd69c24 100644 --- a/drivers/common/cnxk/roc_npc_mcam_dump.c +++ b/drivers/common/cnxk/roc_npc_mcam_dump.c @@ -496,6 +496,10 @@ npc_flow_dump_rx_action(FILE *file, uint64_t npc_action) plt_strlcpy(index_name, "Multicast/mirror table index", NPC_MAX_FIELD_NAME_SIZE); break; + case NIX_RX_ACTIONOP_DEFAULT: + fprintf(file, "NIX_RX_ACTIONOP_DEFAULT (%" PRIu64 ")\n", + (uint64_t)NIX_RX_ACTIONOP_DEFAULT); + break; default: plt_err("Unknown NIX_RX_ACTIONOP found"); return; @@ -645,6 +649,7 @@ npc_flow_hw_mcam_entry_dump(FILE *file, struct npc *npc, struct roc_npc_flow *fl struct nix_inl_dev *inl_dev = NULL; struct idev_cfg *idev; struct mbox *mbox; + uint8_t enabled; int rc = 0, i; idev = idev_get_cfg(); @@ -673,6 +678,7 @@ npc_flow_hw_mcam_entry_dump(FILE *file, struct npc *npc, struct roc_npc_flow *fl mbox_memcpy(mcam_data, mcam_read_rsp->entry_data.kw, sizeof(mcam_data)); mbox_memcpy(mcam_mask, mcam_read_rsp->entry_data.kw_mask, sizeof(mcam_data)); + enabled = mcam_read_rsp->enable; fprintf(file, "HW MCAM Data :\n"); @@ -680,6 +686,7 @@ npc_flow_hw_mcam_entry_dump(FILE *file, struct npc *npc, struct roc_npc_flow *fl fprintf(file, "\tDW%d :%016lX\n", i, mcam_data[i]); fprintf(file, "\tDW%d_Mask:%016lX\n", i, mcam_mask[i]); } + fprintf(file, "\tEnabled = 0x%x\n", enabled); fprintf(file, "\n"); mbox_put(mbox); diff --git a/drivers/common/cnxk/roc_npc_parse.c b/drivers/common/cnxk/roc_npc_parse.c index acaebaed21a..ecd1b3e13bd 100644 --- a/drivers/common/cnxk/roc_npc_parse.c +++ b/drivers/common/cnxk/roc_npc_parse.c @@ -719,8 +719,8 @@ npc_handle_ipv6ext_attr(const struct roc_npc_flow_item_ipv6 *ipv6_spec, static int npc_process_ipv6_item(struct npc_parse_state *pst) { - uint8_t ipv6_hdr_mask[sizeof(struct roc_ipv6_hdr) + sizeof(struct roc_ipv6_fragment_ext)]; - uint8_t ipv6_hdr_buf[sizeof(struct roc_ipv6_hdr) + sizeof(struct roc_ipv6_fragment_ext)]; + uint8_t ipv6_hdr_mask[2 * sizeof(struct roc_ipv6_hdr)]; + uint8_t ipv6_hdr_buf[2 * sizeof(struct roc_ipv6_hdr)]; const struct roc_npc_flow_item_ipv6 *ipv6_spec, *ipv6_mask; const struct roc_npc_item_info *pattern = pst->pattern; int offset = 0, rc = 0, lid, item_count = 0; @@ -804,6 +804,16 @@ npc_process_ipv6_item(struct npc_parse_state *pst) sizeof(struct roc_ipv6_fragment_ext)); break; + } else if (pattern->type == ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT) { + item_count++; + ltype = NPC_LT_LC_IP6_EXT; + parse_info.len = sizeof(struct roc_ipv6_hdr) + pattern->size; + + if (pattern->spec) + memcpy(ipv6_hdr_buf + offset, pattern->spec, pattern->size); + if (pattern->mask) + memcpy(ipv6_hdr_mask + offset, pattern->mask, pattern->size); + break; } pattern++; @@ -867,6 +877,7 @@ npc_parse_lc(struct npc_parse_state *pst) case ROC_NPC_ITEM_TYPE_IPV6: case ROC_NPC_ITEM_TYPE_IPV6_EXT: case ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT: + case ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT: return npc_process_ipv6_item(pst); case ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4: lt = NPC_LT_LC_ARP; diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h index 593dca353bb..424f8e207ad 100644 --- a/drivers/common/cnxk/roc_npc_priv.h +++ b/drivers/common/cnxk/roc_npc_priv.h @@ -378,6 +378,13 @@ struct npc_prio_flow_entry { TAILQ_HEAD(npc_prio_flow_list_head, npc_prio_flow_entry); +struct npc_age_flow_entry { + struct roc_npc_flow *flow; + TAILQ_ENTRY(npc_age_flow_entry) next; +}; + +TAILQ_HEAD(npc_age_flow_list_head, npc_age_flow_entry); + struct npc { struct mbox *mbox; /* Mbox */ uint32_t keyx_supp_nmask[NPC_MAX_INTF]; /* nibble mask */ @@ -403,6 +410,7 @@ struct npc { npc_ld_flags_t prx_lfcfg; /* KEX LD_Flags CFG */ struct npc_flow_list *flow_list; struct npc_prio_flow_list_head *prio_flow_list; + struct npc_age_flow_list_head age_flow_list; struct plt_bitmap *rss_grp_entries; struct npc_flow_list ipsec_list; uint8_t exact_match_ena; @@ -480,4 +488,13 @@ int npc_rss_action_program(struct roc_npc *roc_npc, const struct roc_npc_action int npc_rss_group_free(struct npc *npc, struct roc_npc_flow *flow); int npc_mcam_init(struct npc *npc, struct roc_npc_flow *flow, int mcam_id); int npc_mcam_move(struct mbox *mbox, uint16_t old_ent, uint16_t new_ent); +void npc_age_flow_list_entry_add(struct roc_npc *npc, struct roc_npc_flow *flow); +void npc_age_flow_list_entry_delete(struct roc_npc *npc, struct roc_npc_flow *flow); +uint32_t npc_aged_flows_get(void *args); +int npc_aged_flows_bitmap_alloc(struct roc_npc *roc_npc); +void npc_aged_flows_bitmap_free(struct roc_npc *roc_npc); +int npc_aging_ctrl_thread_create(struct roc_npc *roc_npc, + const struct roc_npc_action_age *age, + struct roc_npc_flow *flow); +void npc_aging_ctrl_thread_destroy(struct roc_npc *roc_npc); #endif /* _ROC_NPC_PRIV_H_ */ diff --git a/drivers/common/cnxk/roc_platform.c b/drivers/common/cnxk/roc_platform.c index f91b95ceaba..15cbb6d68ff 100644 --- a/drivers/common/cnxk/roc_platform.c +++ b/drivers/common/cnxk/roc_platform.c @@ -21,6 +21,31 @@ roc_plt_init_cb_register(roc_plt_init_cb_t cb) return 0; } +uint16_t +roc_plt_control_lmt_id_get(void) +{ + uint32_t lcore_id = plt_lcore_id(); + if (lcore_id != LCORE_ID_ANY) + return lcore_id << ROC_LMT_LINES_PER_CORE_LOG2; + else + /* Return Last LMT ID to be use in control path functionality */ + return ROC_NUM_LMT_LINES - 1; +} + +uint16_t +roc_plt_lmt_validate(void) +{ + if (!roc_model_is_cn9k()) { + /* Last LMT line is reserved for control specific operation and can be + * use from any EAL or non EAL cores. + */ + if ((RTE_MAX_LCORE << ROC_LMT_LINES_PER_CORE_LOG2) > + (ROC_NUM_LMT_LINES - 1)) + return 0; + } + return 1; +} + int roc_plt_init(void) { @@ -60,14 +85,15 @@ roc_plt_init(void) return 0; } -RTE_LOG_REGISTER(cnxk_logtype_base, pmd.cnxk.base, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_mbox, pmd.cnxk.mbox, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_cpt, pmd.crypto.cnxk, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_ml, pmd.ml.cnxk, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_npa, pmd.mempool.cnxk, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_nix, pmd.net.cnxk, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_npc, pmd.net.cnxk.flow, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_sso, pmd.event.cnxk, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_tim, pmd.event.cnxk.timer, NOTICE); -RTE_LOG_REGISTER(cnxk_logtype_tm, pmd.net.cnxk.tm, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_base, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_mbox, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_cpt, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ml, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_npa, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_nix, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_npc, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_sso, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tim, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tm, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_dpi, NOTICE); RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE); diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h index 08f83aba129..ba23b2e0d79 100644 --- a/drivers/common/cnxk/roc_platform.h +++ b/drivers/common/cnxk/roc_platform.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -129,12 +130,27 @@ #define plt_spinlock_unlock rte_spinlock_unlock #define plt_spinlock_trylock rte_spinlock_trylock +#define plt_seqcount_t rte_seqcount_t +#define plt_seqcount_init rte_seqcount_init +#define plt_seqcount_read_begin rte_seqcount_read_begin +#define plt_seqcount_read_retry rte_seqcount_read_retry +#define plt_seqcount_write_begin rte_seqcount_write_begin +#define plt_seqcount_write_end rte_seqcount_write_end + +#define plt_thread_t rte_thread_t #define plt_intr_callback_register rte_intr_callback_register #define plt_intr_callback_unregister rte_intr_callback_unregister #define plt_intr_disable rte_intr_disable #define plt_thread_is_intr rte_thread_is_intr #define plt_intr_callback_fn rte_intr_callback_fn -#define plt_ctrl_thread_create rte_ctrl_thread_create +#define plt_thread_create_control rte_thread_create_internal_control +#define plt_thread_join rte_thread_join + +static inline bool +plt_thread_is_valid(plt_thread_t thr) +{ + return thr.opaque_id ? true : false; +} #define plt_intr_efd_counter_size_get rte_intr_efd_counter_size_get #define plt_intr_efd_counter_size_set rte_intr_efd_counter_size_set @@ -194,6 +210,11 @@ #define plt_bit_relaxed_set64 rte_bit_relaxed_set64 #define plt_bit_relaxed_clear64 rte_bit_relaxed_clear64 +#define plt_popcount32 rte_popcount32 +#define plt_popcount64 rte_popcount64 +#define plt_clz32 rte_clz32 +#define plt_ctz64 rte_ctz64 + #define plt_mmap mmap #define PLT_PROT_READ PROT_READ #define PLT_PROT_WRITE PROT_WRITE @@ -242,6 +263,7 @@ extern int cnxk_logtype_sso; extern int cnxk_logtype_tim; extern int cnxk_logtype_tm; extern int cnxk_logtype_ree; +extern int cnxk_logtype_dpi; #define plt_err(fmt, args...) \ RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", __func__, __LINE__, ##args) @@ -270,6 +292,7 @@ extern int cnxk_logtype_ree; #define plt_tim_dbg(fmt, ...) plt_dbg(tim, fmt, ##__VA_ARGS__) #define plt_tm_dbg(fmt, ...) plt_dbg(tm, fmt, ##__VA_ARGS__) #define plt_ree_dbg(fmt, ...) plt_dbg(ree, fmt, ##__VA_ARGS__) +#define plt_dpi_dbg(fmt, ...) plt_dbg(dpi, fmt, ##__VA_ARGS__) /* Datapath logs */ #define plt_dp_err(fmt, args...) \ @@ -305,6 +328,11 @@ extern int cnxk_logtype_ree; __rte_internal int roc_plt_init(void); +__rte_internal +uint16_t roc_plt_control_lmt_id_get(void); +__rte_internal +uint16_t roc_plt_lmt_validate(void); + /* Init callbacks */ typedef int (*roc_plt_init_cb_t)(void); int __roc_api roc_plt_init_cb_register(roc_plt_init_cb_t cb); diff --git a/drivers/common/cnxk/roc_ree.c b/drivers/common/cnxk/roc_ree.c index 1eb2ae72724..b6392658c31 100644 --- a/drivers/common/cnxk/roc_ree.c +++ b/drivers/common/cnxk/roc_ree.c @@ -441,7 +441,7 @@ static void roc_ree_lf_err_intr_unregister(struct roc_ree_vf *vf, uint16_t msix_off, uintptr_t base) { - struct rte_pci_device *pci_dev = vf->pci_dev; + struct plt_pci_device *pci_dev = vf->pci_dev; /* Disable error interrupts */ plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C); @@ -468,7 +468,7 @@ static int roc_ree_lf_err_intr_register(struct roc_ree_vf *vf, uint16_t msix_off, uintptr_t base) { - struct rte_pci_device *pci_dev = vf->pci_dev; + struct plt_pci_device *pci_dev = vf->pci_dev; int ret; /* Disable error interrupts */ diff --git a/drivers/common/cnxk/roc_ree.h b/drivers/common/cnxk/roc_ree.h index e138e4de66b..bf994e71245 100644 --- a/drivers/common/cnxk/roc_ree.h +++ b/drivers/common/cnxk/roc_ree.h @@ -68,7 +68,7 @@ struct roc_ree_qp { /**< Base address where BAR is mapped */ struct roc_ree_pending_queue pend_q; /**< Pending queue */ - rte_iova_t iq_dma_addr; + plt_iova_t iq_dma_addr; /**< Instruction queue address */ uint32_t roc_regexdev_jobid; /**< Job ID */ diff --git a/drivers/common/cnxk/roc_se.c b/drivers/common/cnxk/roc_se.c index 26622973157..6ced4ef7896 100644 --- a/drivers/common/cnxk/roc_se.c +++ b/drivers/common/cnxk/roc_se.c @@ -58,9 +58,6 @@ cpt_ciph_type_set(roc_se_cipher_type type, struct roc_se_ctx *ctx, uint16_t key_ int fc_type = 0; switch (type) { - case ROC_SE_PASSTHROUGH: - fc_type = ROC_SE_FC_GEN; - break; case ROC_SE_DES3_CBC: case ROC_SE_DES3_ECB: case ROC_SE_DES_DOCSISBPI: @@ -526,6 +523,10 @@ roc_se_ciph_key_set(struct roc_se_ctx *se_ctx, roc_se_cipher_type type, const ui uint8_t *ci_key; int i, ret; + /* For NULL cipher, no processing required. */ + if (type == ROC_SE_PASSTHROUGH) + return 0; + zs_ch_ctx = &se_ctx->se_ctx.zs_ch_ctx; if (roc_model_is_cn9k()) { @@ -555,10 +556,6 @@ roc_se_ciph_key_set(struct roc_se_ctx *se_ctx, roc_se_cipher_type type, const ui } switch (type) { - case ROC_SE_PASSTHROUGH: - se_ctx->enc_cipher = 0; - fctx->enc.enc_cipher = 0; - goto success; case ROC_SE_DES3_CBC: /* CPT performs DES using 3DES with the 8B DES-key * replicated 2 more times to match the 24B 3DES-key. diff --git a/drivers/common/cnxk/roc_se.h b/drivers/common/cnxk/roc_se.h index 008ab31912a..d8cbd58c9a6 100644 --- a/drivers/common/cnxk/roc_se.h +++ b/drivers/common/cnxk/roc_se.h @@ -183,13 +183,15 @@ typedef enum { struct roc_se_enc_context { uint64_t iv_source : 1; uint64_t aes_key : 2; - uint64_t rsvd_60 : 1; + uint64_t rsvd_59 : 1; uint64_t enc_cipher : 4; uint64_t auth_input_type : 1; - uint64_t rsvd_52_54 : 3; + uint64_t auth_key_src : 1; + uint64_t rsvd_50_51 : 2; uint64_t hash_type : 4; uint64_t mac_len : 8; - uint64_t rsvd_39_0 : 40; + uint64_t rsvd_16_39 : 24; + uint64_t hmac_key_sz : 16; uint8_t encr_key[32]; uint8_t encr_iv[16]; }; @@ -321,6 +323,8 @@ struct roc_se_ctx { uint64_t ciph_then_auth : 1; uint64_t auth_then_ciph : 1; uint64_t eia2 : 1; + /* auth_iv_offset passed to PDCP_CHAIN opcode based on FVC bit */ + uint8_t pdcp_iv_offset; union cpt_inst_w4 template_w4; /* Below fields are accessed by hardware */ struct se_ctx_s { @@ -358,12 +362,13 @@ struct roc_se_fc_params { struct roc_se_iov_ptr *dst_iov; }; }; - void *iv_buf; - void *auth_iv_buf; + const void *iv_buf; + const void *auth_iv_buf; struct roc_se_ctx *ctx; struct roc_se_buf_ptr meta_buf; uint8_t cipher_iv_len; uint8_t auth_iv_len; + uint8_t pdcp_iv_offset; struct roc_se_buf_ptr aad_buf; struct roc_se_buf_ptr mac_buf; diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c index a5f48d5bbcc..748d287bada 100644 --- a/drivers/common/cnxk/roc_sso.c +++ b/drivers/common/cnxk/roc_sso.c @@ -5,7 +5,8 @@ #include "roc_api.h" #include "roc_priv.h" -#define SSO_XAQ_CACHE_CNT (0x7) +#define SSO_XAQ_CACHE_CNT (0x3) +#define SSO_XAQ_RSVD_CNT (0x4) #define SSO_XAQ_SLACK (16) /* Private functions. */ @@ -185,8 +186,8 @@ sso_rsrc_get(struct roc_sso *roc_sso) } void -sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, - uint16_t hwgrp[], uint16_t n, uint16_t enable) +sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, uint16_t hwgrp[], + uint16_t n, uint8_t set, uint16_t enable) { uint64_t reg; int i, j, k; @@ -203,7 +204,7 @@ sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, k = n % 4; k = k ? k : 4; for (j = 0; j < k; j++) { - mask[j] = hwgrp[i + j] | enable << 14; + mask[j] = hwgrp[i + j] | (uint32_t)set << 12 | enable << 14; if (bmp) { enable ? plt_bitmap_set(bmp, hwgrp[i + j]) : plt_bitmap_clear(bmp, hwgrp[i + j]); @@ -289,8 +290,8 @@ roc_sso_ns_to_gw(struct roc_sso *roc_sso, uint64_t ns) } int -roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], - uint16_t nb_hwgrp) +roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], uint16_t nb_hwgrp, + uint8_t set) { struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; struct sso *sso; @@ -298,14 +299,14 @@ roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], sso = roc_sso_to_sso_priv(roc_sso); base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); - sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 1); + sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, set, 1); return nb_hwgrp; } int -roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], - uint16_t nb_hwgrp) +roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], uint16_t nb_hwgrp, + uint8_t set) { struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; struct sso *sso; @@ -313,7 +314,7 @@ roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], sso = roc_sso_to_sso_priv(roc_sso); base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); - sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 0); + sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, set, 0); return nb_hwgrp; } @@ -357,6 +358,37 @@ roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws, return rc; } +void +roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws, + uint8_t nb_hws) +{ + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct ssow_lf_inv_req *req; + struct dev *dev = &sso->dev; + struct mbox *mbox; + int i; + + if (!nb_hws) + return; + + mbox = mbox_get(dev->mbox); + req = mbox_alloc_msg_sso_ws_cache_inv(mbox); + if (req == NULL) { + mbox_process(mbox); + req = mbox_alloc_msg_sso_ws_cache_inv(mbox); + if (req == NULL) { + mbox_put(mbox); + return; + } + } + req->hdr.ver = SSOW_INVAL_SELECTIVE_VER; + req->nb_hws = nb_hws; + for (i = 0; i < nb_hws; i++) + req->hws[i] = hws[i]; + mbox_process(mbox); + mbox_put(mbox); +} + int roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp, struct roc_sso_hwgrp_stats *stats) @@ -499,6 +531,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, * pipelining. */ xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp); + xaq->nb_xaq += (SSO_XAQ_RSVD_CNT * nb_hwgrp); xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq); xaq->nb_xaq += SSO_XAQ_SLACK; @@ -542,8 +575,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, * There should be a minimum headroom of 7 XAQs per HWGRP for SSO * to request XAQ to cache them even before enqueue is called. */ - xaq->xaq_lmt = - xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT) - SSO_XAQ_SLACK; + xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT) - SSO_XAQ_SLACK; return 0; npa_fill_fail: diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h index a2bb6fcb22d..64f14b8119c 100644 --- a/drivers/common/cnxk/roc_sso.h +++ b/drivers/common/cnxk/roc_sso.h @@ -84,10 +84,10 @@ int __roc_api roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp, uint8_t weight, uint8_t affinity, uint8_t priority); uint64_t __roc_api roc_sso_ns_to_gw(struct roc_sso *roc_sso, uint64_t ns); -int __roc_api roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, - uint16_t hwgrp[], uint16_t nb_hwgrp); -int __roc_api roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, - uint16_t hwgrp[], uint16_t nb_hwgrp); +int __roc_api roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], + uint16_t nb_hwgrp, uint8_t set); +int __roc_api roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], + uint16_t nb_hwgrp, uint8_t set); int __roc_api roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp); uintptr_t __roc_api roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws); @@ -100,6 +100,8 @@ int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, int __roc_api roc_sso_hwgrp_stash_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_stash *stash, uint16_t nb_stash); +void __roc_api roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws, + uint8_t nb_hws); /* Debug */ void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws, diff --git a/drivers/common/cnxk/roc_sso_dp.h b/drivers/common/cnxk/roc_sso_dp.h index 9d30286d2f8..a860340c8fe 100644 --- a/drivers/common/cnxk/roc_sso_dp.h +++ b/drivers/common/cnxk/roc_sso_dp.h @@ -13,13 +13,13 @@ roc_sso_hws_head_wait(uintptr_t base) #if defined(__aarch64__) asm volatile(PLT_CPU_FEATURE_PREAMBLE - " ldr %[tag], [%[tag_op]] \n" - " tbnz %[tag], 35, done%= \n" + " ldr %[tag], [%[tag_op]] \n" + " tbnz %[tag], 35, .Ldone%= \n" " sevl \n" - "rty%=: wfe \n" - " ldr %[tag], [%[tag_op]] \n" - " tbz %[tag], 35, rty%= \n" - "done%=: \n" + ".Lrty%=: wfe \n" + " ldr %[tag], [%[tag_op]] \n" + " tbz %[tag], 35, .Lrty%= \n" + ".Ldone%=: \n" : [tag] "=&r"(tag) : [tag_op] "r"(tag_op)); #else @@ -30,4 +30,12 @@ roc_sso_hws_head_wait(uintptr_t base) return tag; } +static __plt_always_inline uint8_t +roc_sso_hws_is_head(uintptr_t base) +{ + uintptr_t tag_op = base + SSOW_LF_GWS_TAG; + + return !!(plt_read64(tag_op) & BIT_ULL(35)); +} + #endif /* _ROC_SSO_DP_H_ */ diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h index 09729d4f628..21c59c57e65 100644 --- a/drivers/common/cnxk/roc_sso_priv.h +++ b/drivers/common/cnxk/roc_sso_priv.h @@ -44,8 +44,8 @@ roc_sso_to_sso_priv(struct roc_sso *roc_sso) int sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf, void **rsp); int sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf); -void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, - uint16_t hwgrp[], uint16_t n, uint16_t enable); +void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, uint16_t hwgrp[], + uint16_t n, uint8_t set, uint16_t enable); int sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps); int sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps); int sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 8c71497df8c..fffd2064bed 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -7,6 +7,7 @@ INTERNAL { cnxk_ipsec_outb_roundup_byte; cnxk_logtype_base; cnxk_logtype_cpt; + cnxk_logtype_dpi; cnxk_logtype_mbox; cnxk_logtype_ml; cnxk_logtype_nix; @@ -170,6 +171,7 @@ INTERNAL { roc_mcs_rx_sc_sa_map_write; roc_mcs_sa_policy_read; roc_mcs_sa_policy_write; + roc_mcs_sa_port_map_update; roc_mcs_sc_stats_get; roc_mcs_secy_policy_read; roc_mcs_secy_policy_write; @@ -214,6 +216,7 @@ INTERNAL { roc_nix_get_base_chan; roc_nix_get_pf; roc_nix_get_pf_func; + roc_nix_get_rx_chan_cnt; roc_nix_get_vf; roc_nix_get_vwqe_interval; roc_nix_inl_cb_register; @@ -379,6 +382,7 @@ INTERNAL { roc_nix_tm_node_suspend_resume; roc_nix_tm_prealloc_res; roc_nix_tm_pfc_prepare_tree; + roc_nix_tm_pfc_rlimit_sq; roc_nix_tm_prepare_rate_limited_tree; roc_nix_tm_rlimit_sq; roc_nix_tm_root_has_sp; @@ -425,6 +429,7 @@ INTERNAL { roc_npa_pool_op_range_set; roc_npa_pool_range_update_check; roc_npa_zero_aura_handle; + roc_npc_aged_flow_ctx_get; roc_npc_fini; roc_npc_flow_create; roc_npc_flow_destroy; @@ -457,8 +462,10 @@ INTERNAL { roc_npc_validate_portid_action; roc_ot_ipsec_inb_sa_init; roc_ot_ipsec_outb_sa_init; + roc_plt_control_lmt_id_get; roc_plt_init; roc_plt_init_cb_register; + roc_plt_lmt_validate; roc_sso_dev_fini; roc_sso_dev_init; roc_sso_dump; @@ -475,6 +482,7 @@ INTERNAL { roc_sso_hws_base_get; roc_sso_hws_link; roc_sso_hws_stats_get; + roc_sso_hws_gwc_invalidate; roc_sso_hws_unlink; roc_sso_ns_to_gw; roc_sso_rsrc_fini; diff --git a/drivers/common/cpt/cpt_mcode_defines.h b/drivers/common/cpt/cpt_mcode_defines.h index e6dcb7674ca..b337dbc68d6 100644 --- a/drivers/common/cpt/cpt_mcode_defines.h +++ b/drivers/common/cpt/cpt_mcode_defines.h @@ -364,6 +364,24 @@ struct cpt_ec_group { struct cpt_asym_ec_ctx { /* Prime length defined by microcode for EC operations */ uint8_t curveid; + + /* Private key */ + struct { + uint8_t data[66]; + unsigned int length; + } pkey; + + /* Public key */ + struct { + struct { + uint8_t data[66]; + unsigned int length; + } x; + struct { + uint8_t data[66]; + unsigned int length; + } y; + } q; }; struct cpt_asym_sess_misc { diff --git a/drivers/common/cpt/cpt_ucode_asym.h b/drivers/common/cpt/cpt_ucode_asym.h index 1105a0c1250..e1034bbeb4f 100644 --- a/drivers/common/cpt/cpt_ucode_asym.h +++ b/drivers/common/cpt/cpt_ucode_asym.h @@ -633,12 +633,13 @@ static __rte_always_inline void cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa, struct asym_op_params *ecdsa_params, uint64_t fpm_table_iova, - uint8_t curveid) + struct cpt_asym_sess_misc *sess) { struct cpt_request_info *req = ecdsa_params->req; uint16_t message_len = ecdsa->message.length; phys_addr_t mphys = ecdsa_params->meta_buf; - uint16_t pkey_len = ecdsa->pkey.length; + uint16_t pkey_len = sess->ec_ctx.pkey.length; + uint8_t curveid = sess->ec_ctx.curveid; uint16_t p_align, k_align, m_align; uint16_t k_len = ecdsa->k.length; uint16_t order_len, prime_len; @@ -688,7 +689,7 @@ cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa, memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len); dptr += p_align; - memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len); + memcpy(dptr + pk_offset, sess->ec_ctx.pkey.data, pkey_len); dptr += p_align; memcpy(dptr, ecdsa->message.data, message_len); @@ -735,14 +736,15 @@ static __rte_always_inline void cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa, struct asym_op_params *ecdsa_params, uint64_t fpm_table_iova, - uint8_t curveid) + struct cpt_asym_sess_misc *sess) { struct cpt_request_info *req = ecdsa_params->req; uint32_t message_len = ecdsa->message.length; phys_addr_t mphys = ecdsa_params->meta_buf; + uint16_t qx_len = sess->ec_ctx.q.x.length; + uint16_t qy_len = sess->ec_ctx.q.y.length; + uint8_t curveid = sess->ec_ctx.curveid; uint16_t o_offset, r_offset, s_offset; - uint16_t qx_len = ecdsa->q.x.length; - uint16_t qy_len = ecdsa->q.y.length; uint16_t r_len = ecdsa->r.length; uint16_t s_len = ecdsa->s.length; uint16_t order_len, prime_len; @@ -802,10 +804,10 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa, memcpy(dptr, ec_grp[curveid].prime.data, prime_len); dptr += p_align; - memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len); + memcpy(dptr + qx_offset, sess->ec_ctx.q.x.data, qx_len); dptr += p_align; - memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len); + memcpy(dptr + qy_offset, sess->ec_ctx.q.y.data, qy_len); dptr += p_align; memcpy(dptr, ec_grp[curveid].consta.data, prime_len); @@ -852,10 +854,10 @@ cpt_enqueue_ecdsa_op(struct rte_crypto_op *op, uint8_t curveid = sess->ec_ctx.curveid; if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN) - cpt_ecdsa_sign_prep(ecdsa, params, fpm_iova[curveid], curveid); + cpt_ecdsa_sign_prep(ecdsa, params, fpm_iova[curveid], sess); else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY) cpt_ecdsa_verify_prep(ecdsa, params, fpm_iova[curveid], - curveid); + sess); else { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; diff --git a/drivers/common/dpaax/caamflib/desc.h b/drivers/common/dpaax/caamflib/desc.h index 635d6bad071..4a1285c4d48 100644 --- a/drivers/common/dpaax/caamflib/desc.h +++ b/drivers/common/dpaax/caamflib/desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2008-2016 Freescale Semiconductor Inc. - * Copyright 2016, 2019 NXP + * Copyright 2016, 2019, 2023 NXP * */ @@ -662,6 +662,9 @@ extern enum rta_sec_era rta_sec_era; #define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c #define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d #define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e +#define OP_PCL_IPSEC_HMAC_SHA2_224_96 0x00f2 +#define OP_PCL_IPSEC_HMAC_SHA2_224_112 0x00f4 +#define OP_PCL_IPSEC_HMAC_SHA2_224_224 0x00f8 /* For SRTP - OP_PCLID_SRTP */ #define OP_PCL_SRTP_CIPHER_MASK 0xff00 diff --git a/drivers/common/dpaax/caamflib/desc/ipsec.h b/drivers/common/dpaax/caamflib/desc/ipsec.h index 8ec6aac915b..95fc3ea5ba3 100644 --- a/drivers/common/dpaax/caamflib/desc/ipsec.h +++ b/drivers/common/dpaax/caamflib/desc/ipsec.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2008-2016 Freescale Semiconductor Inc. - * Copyright 2016,2019-2020 NXP + * Copyright 2016,2019-2022 NXP * */ @@ -710,6 +710,11 @@ static inline void __gen_auth_key(struct program *program, case OP_PCL_IPSEC_HMAC_SHA2_512_256: dkp_protid = OP_PCLID_DKP_SHA512; break; + case OP_PCL_IPSEC_HMAC_SHA2_224_96: + case OP_PCL_IPSEC_HMAC_SHA2_224_112: + case OP_PCL_IPSEC_HMAC_SHA2_224_224: + dkp_protid = OP_PCLID_DKP_SHA224; + break; default: KEY(program, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen, INLINE_KEY(authdata)); @@ -1380,7 +1385,7 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps, * layers to determine whether keys can be inlined or not. To be used as first * parameter of rta_inline_query(). */ -#define IPSEC_AUTH_VAR_BASE_DESC_LEN (27 * CAAM_CMD_SZ) +#define IPSEC_AUTH_VAR_BASE_DESC_LEN (31 * CAAM_CMD_SZ) /** * IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h b/drivers/common/dpaax/caamflib/desc/pdcp.h index 289ee2a7d58..7d16c66d791 100644 --- a/drivers/common/dpaax/caamflib/desc/pdcp.h +++ b/drivers/common/dpaax/caamflib/desc/pdcp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+ * Copyright 2008-2013 Freescale Semiconductor, Inc. - * Copyright 2019-2022 NXP + * Copyright 2019-2023 NXP */ #ifndef __DESC_PDCP_H__ @@ -2338,27 +2338,27 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf, desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = { { /* NULL */ SHR_WAIT, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ - SHR_ALWAYS /* ZUC-I */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ + SHR_WAIT /* ZUC-I */ }, { /* SNOW f8 */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* AES CTR */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* ZUC-E */ - SHR_ALWAYS, /* NULL */ + SHR_WAIT, /* NULL */ SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ - SHR_ALWAYS /* ZUC-I */ + SHR_WAIT /* ZUC-I */ }, }; enum pdb_type_e pdb_type; @@ -2478,27 +2478,27 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf, desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = { { /* NULL */ SHR_WAIT, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ - SHR_ALWAYS /* ZUC-I */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ + SHR_WAIT /* ZUC-I */ }, { /* SNOW f8 */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* AES CTR */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* ZUC-E */ - SHR_ALWAYS, /* NULL */ + SHR_WAIT, /* NULL */ SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ - SHR_ALWAYS /* ZUC-I */ + SHR_WAIT /* ZUC-I */ }, }; enum pdb_type_e pdb_type; @@ -2643,24 +2643,24 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf, desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = { { /* NULL */ SHR_WAIT, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ - SHR_ALWAYS /* ZUC-I */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ + SHR_WAIT /* ZUC-I */ }, { /* SNOW f8 */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* AES CTR */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* ZUC-E */ - SHR_ALWAYS, /* NULL */ + SHR_WAIT, /* NULL */ SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ @@ -2677,7 +2677,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf, if (authdata) SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0); else - SHR_HDR(p, SHR_ALWAYS, 0, 0); + SHR_HDR(p, SHR_WAIT, 0, 0); pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction, hfn_threshold, cipherdata, authdata); @@ -2828,24 +2828,24 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf, desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = { { /* NULL */ SHR_WAIT, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ - SHR_ALWAYS /* ZUC-I */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ + SHR_WAIT /* ZUC-I */ }, { /* SNOW f8 */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* AES CTR */ - SHR_ALWAYS, /* NULL */ - SHR_ALWAYS, /* SNOW f9 */ - SHR_ALWAYS, /* AES CMAC */ + SHR_WAIT, /* NULL */ + SHR_WAIT, /* SNOW f9 */ + SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ }, { /* ZUC-E */ - SHR_ALWAYS, /* NULL */ + SHR_WAIT, /* NULL */ SHR_WAIT, /* SNOW f9 */ SHR_WAIT, /* AES CMAC */ SHR_WAIT /* ZUC-I */ @@ -2862,7 +2862,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf, if (authdata) SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0); else - SHR_HDR(p, SHR_ALWAYS, 0, 0); + SHR_HDR(p, SHR_WAIT, 0, 0); pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction, hfn_threshold, diff --git a/drivers/common/dpaax/caamflib/rta/protocol_cmd.h b/drivers/common/dpaax/caamflib/rta/protocol_cmd.h index ac5c8af716a..5b33f103bea 100644 --- a/drivers/common/dpaax/caamflib/rta/protocol_cmd.h +++ b/drivers/common/dpaax/caamflib/rta/protocol_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2008-2016 Freescale Semiconductor Inc. - * Copyright 2016,2019 NXP + * Copyright 2016,2019,2023 NXP * */ @@ -241,6 +241,9 @@ __rta_ipsec_proto(uint16_t protoinfo) case OP_PCL_IPSEC_HMAC_MD5_128: case OP_PCL_IPSEC_HMAC_SHA1_160: case OP_PCL_IPSEC_AES_CMAC_96: + case OP_PCL_IPSEC_HMAC_SHA2_224_96: + case OP_PCL_IPSEC_HMAC_SHA2_224_112: + case OP_PCL_IPSEC_HMAC_SHA2_224_224: case OP_PCL_IPSEC_HMAC_SHA2_256_128: case OP_PCL_IPSEC_HMAC_SHA2_384_192: case OP_PCL_IPSEC_HMAC_SHA2_512_256: diff --git a/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h b/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h index f40eaadea3b..5c2efeb2c59 100644 --- a/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h +++ b/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h @@ -413,7 +413,7 @@ rta_program_finalize(struct program *program) { /* Descriptor is usually not allowed to go beyond 64 words size */ if (program->current_pc > MAX_CAAM_DESCSIZE) - pr_warn("Descriptor Size exceeded max limit of 64 words\n"); + pr_debug("Descriptor Size exceeded max limit of 64 words"); /* Descriptor is erroneous */ if (program->first_error_pc) { diff --git a/drivers/common/idpf/base/README b/drivers/common/idpf/base/README index 693049c0570..ff26f736eca 100644 --- a/drivers/common/idpf/base/README +++ b/drivers/common/idpf/base/README @@ -6,7 +6,7 @@ Intel® IDPF driver ================== This directory contains source code of BSD-3-Clause idpf driver of version -2023.02.23 released by the team which develops basic drivers for Intel IPU. +2023.07.25 released by the team which develops basic drivers for Intel IPU. The directory of base/ contains the original source package. This driver is valid for the product(s) listed below diff --git a/drivers/common/idpf/base/idpf_common.c b/drivers/common/idpf/base/idpf_common.c index fbf71416fd5..7181a7f14c7 100644 --- a/drivers/common/idpf/base/idpf_common.c +++ b/drivers/common/idpf/base/idpf_common.c @@ -239,8 +239,10 @@ int idpf_clean_arq_element(struct idpf_hw *hw, e->desc.ret_val = msg.status; e->desc.datalen = msg.data_len; if (msg.data_len > 0) { - if (!msg.ctx.indirect.payload) - return -EINVAL; + if (!msg.ctx.indirect.payload || !msg.ctx.indirect.payload->va || + !e->msg_buf) { + return -EFAULT; + } e->buf_len = msg.data_len; msg_data_len = msg.data_len; idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, msg_data_len, @@ -260,12 +262,12 @@ int idpf_clean_arq_element(struct idpf_hw *hw, * idpf_deinit_hw - shutdown routine * @hw: pointer to the hardware structure */ -int idpf_deinit_hw(struct idpf_hw *hw) +void idpf_deinit_hw(struct idpf_hw *hw) { hw->asq = NULL; hw->arq = NULL; - return idpf_ctlq_deinit(hw); + idpf_ctlq_deinit(hw); } /** diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c index 6815153e1d8..a82ca628de4 100644 --- a/drivers/common/idpf/base/idpf_controlq.c +++ b/drivers/common/idpf/base/idpf_controlq.c @@ -9,11 +9,10 @@ * @cq: pointer to the specific control queue * @q_create_info: structs containing info for each queue to be initialized */ -static void -idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, - struct idpf_ctlq_create_info *q_create_info) +static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, + struct idpf_ctlq_create_info *q_create_info) { - /* set head and tail registers in our local struct */ + /* set control queue registers in our local struct */ cq->reg.head = q_create_info->reg.head; cq->reg.tail = q_create_info->reg.tail; cq->reg.len = q_create_info->reg.len; @@ -75,7 +74,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) desc->flags = CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); desc->opcode = 0; - desc->datalen = (__le16)CPU_TO_LE16(bi->size); + desc->datalen = CPU_TO_LE16(bi->size); desc->ret_val = 0; desc->cookie_high = 0; desc->cookie_low = 0; @@ -137,6 +136,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, struct idpf_ctlq_create_info *qinfo, struct idpf_ctlq_info **cq_out) { + struct idpf_ctlq_info *cq; bool is_rxq = false; int status = 0; @@ -145,26 +145,26 @@ int idpf_ctlq_add(struct idpf_hw *hw, qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN) return -EINVAL; - *cq_out = (struct idpf_ctlq_info *) - idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info)); - if (!(*cq_out)) + cq = (struct idpf_ctlq_info *) + idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info)); + if (!cq) return -ENOMEM; - (*cq_out)->cq_type = qinfo->type; - (*cq_out)->q_id = qinfo->id; - (*cq_out)->buf_size = qinfo->buf_size; - (*cq_out)->ring_size = qinfo->len; + cq->cq_type = qinfo->type; + cq->q_id = qinfo->id; + cq->buf_size = qinfo->buf_size; + cq->ring_size = qinfo->len; - (*cq_out)->next_to_use = 0; - (*cq_out)->next_to_clean = 0; - (*cq_out)->next_to_post = (*cq_out)->ring_size - 1; + cq->next_to_use = 0; + cq->next_to_clean = 0; + cq->next_to_post = cq->ring_size - 1; switch (qinfo->type) { case IDPF_CTLQ_TYPE_MAILBOX_RX: is_rxq = true; /* fallthrough */ case IDPF_CTLQ_TYPE_MAILBOX_TX: - status = idpf_ctlq_alloc_ring_res(hw, *cq_out); + status = idpf_ctlq_alloc_ring_res(hw, cq); break; default: status = -EINVAL; @@ -175,33 +175,35 @@ int idpf_ctlq_add(struct idpf_hw *hw, goto init_free_q; if (is_rxq) { - idpf_ctlq_init_rxq_bufs(*cq_out); + idpf_ctlq_init_rxq_bufs(cq); } else { /* Allocate the array of msg pointers for TX queues */ - (*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **) + cq->bi.tx_msg = (struct idpf_ctlq_msg **) idpf_calloc(hw, qinfo->len, sizeof(struct idpf_ctlq_msg *)); - if (!(*cq_out)->bi.tx_msg) { + if (!cq->bi.tx_msg) { status = -ENOMEM; goto init_dealloc_q_mem; } } - idpf_ctlq_setup_regs(*cq_out, qinfo); + idpf_ctlq_setup_regs(cq, qinfo); - idpf_ctlq_init_regs(hw, *cq_out, is_rxq); + idpf_ctlq_init_regs(hw, cq, is_rxq); - idpf_init_lock(&(*cq_out)->cq_lock); + idpf_init_lock(&(cq->cq_lock)); - LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list); + LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list); + *cq_out = cq; return status; init_dealloc_q_mem: /* free ring buffers and the ring itself */ - idpf_ctlq_dealloc_ring_res(hw, *cq_out); + idpf_ctlq_dealloc_ring_res(hw, cq); init_free_q: - idpf_free(hw, *cq_out); + idpf_free(hw, cq); + cq = NULL; return status; } @@ -261,16 +263,13 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, * idpf_ctlq_deinit - destroy all control queues * @hw: pointer to hw struct */ -int idpf_ctlq_deinit(struct idpf_hw *hw) +void idpf_ctlq_deinit(struct idpf_hw *hw) { struct idpf_ctlq_info *cq = NULL, *tmp = NULL; - int ret_code = 0; LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head, idpf_ctlq_info, cq_list) idpf_ctlq_remove(hw, cq); - - return ret_code; } /** @@ -426,11 +425,8 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD)) break; - desc_err = LE16_TO_CPU(desc->ret_val); - if (desc_err) { - /* strip off FW internal code */ - desc_err &= 0xff; - } + /* strip off FW internal code */ + desc_err = LE16_TO_CPU(desc->ret_val) & 0xff; msg_status[i] = cq->bi.tx_msg[ntc]; if (!msg_status[i]) diff --git a/drivers/common/idpf/base/idpf_controlq_api.h b/drivers/common/idpf/base/idpf_controlq_api.h index 37803042566..38f5d2df3cb 100644 --- a/drivers/common/idpf/base/idpf_controlq_api.h +++ b/drivers/common/idpf/base/idpf_controlq_api.h @@ -21,10 +21,7 @@ enum idpf_ctlq_type { IDPF_CTLQ_TYPE_RDMA_COMPL = 7 }; -/* - * Generic Control Queue Structures - */ - +/* Generic Control Queue Structures */ struct idpf_ctlq_reg { /* used for queue tracking */ u32 head; @@ -77,6 +74,11 @@ struct idpf_ctlq_msg { u8 context[IDPF_INDIRECT_CTX_SIZE]; struct idpf_dma_mem *payload; } indirect; + struct { + u32 rsvd; + u16 data; + u16 flags; + } sw_cookie; } ctx; }; @@ -152,10 +154,7 @@ enum idpf_mbx_opc { idpf_mbq_opc_send_msg_to_peer_drv = 0x0804, }; -/* - * API supported for control queue management - */ - +/* API supported for control queue management */ /* Will init all required q including default mb. "q_info" is an array of * create_info structs equal to the number of control queues to be created. */ @@ -200,6 +199,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_dma_mem **buffs); /* Will destroy all q including the default mb */ -int idpf_ctlq_deinit(struct idpf_hw *hw); +void idpf_ctlq_deinit(struct idpf_hw *hw); #endif /* _IDPF_CONTROLQ_API_H_ */ diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c b/drivers/common/idpf/base/idpf_controlq_setup.c index 0f1b52a7e9a..21f43c74f57 100644 --- a/drivers/common/idpf/base/idpf_controlq_setup.c +++ b/drivers/common/idpf/base/idpf_controlq_setup.c @@ -11,9 +11,8 @@ * @hw: pointer to hw struct * @cq: pointer to the specific Control queue */ -static int -idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, - struct idpf_ctlq_info *cq) +static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) { size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc); diff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h b/drivers/common/idpf/base/idpf_lan_pf_regs.h index 8542620e011..a51e39a5020 100644 --- a/drivers/common/idpf/base/idpf_lan_pf_regs.h +++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h @@ -24,7 +24,7 @@ #define PF_FW_ARQBAH (PF_FW_BASE + 0x4) #define PF_FW_ARQLEN (PF_FW_BASE + 0x8) #define PF_FW_ARQLEN_ARQLEN_S 0 -#define PF_FW_ARQLEN_ARQLEN_M IDPF_M(0x1FFF, PF_FW_ARQLEN_ARQLEN_S) +#define PF_FW_ARQLEN_ARQLEN_M GENMASK(12, 0) #define PF_FW_ARQLEN_ARQVFE_S 28 #define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) #define PF_FW_ARQLEN_ARQOVFL_S 29 @@ -35,14 +35,14 @@ #define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) #define PF_FW_ARQH (PF_FW_BASE + 0xC) #define PF_FW_ARQH_ARQH_S 0 -#define PF_FW_ARQH_ARQH_M IDPF_M(0x1FFF, PF_FW_ARQH_ARQH_S) +#define PF_FW_ARQH_ARQH_M GENMASK(12, 0) #define PF_FW_ARQT (PF_FW_BASE + 0x10) #define PF_FW_ATQBAL (PF_FW_BASE + 0x14) #define PF_FW_ATQBAH (PF_FW_BASE + 0x18) #define PF_FW_ATQLEN (PF_FW_BASE + 0x1C) #define PF_FW_ATQLEN_ATQLEN_S 0 -#define PF_FW_ATQLEN_ATQLEN_M IDPF_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S) +#define PF_FW_ATQLEN_ATQLEN_M GENMASK(9, 0) #define PF_FW_ATQLEN_ATQVFE_S 28 #define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) #define PF_FW_ATQLEN_ATQOVFL_S 29 @@ -53,7 +53,7 @@ #define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) #define PF_FW_ATQH (PF_FW_BASE + 0x20) #define PF_FW_ATQH_ATQH_S 0 -#define PF_FW_ATQH_ATQH_M IDPF_M(0x3FF, PF_FW_ATQH_ATQH_S) +#define PF_FW_ATQH_ATQH_M GENMASK(9, 0) #define PF_FW_ATQT (PF_FW_BASE + 0x24) /* Interrupts */ @@ -66,7 +66,7 @@ #define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2 #define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S) #define PF_GLINT_DYN_CTL_ITR_INDX_S 3 -#define PF_GLINT_DYN_CTL_ITR_INDX_M IDPF_M(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S) +#define PF_GLINT_DYN_CTL_ITR_INDX_M GENMASK(4, 3) #define PF_GLINT_DYN_CTL_INTERVAL_S 5 #define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S) #define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 @@ -80,19 +80,20 @@ /* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is * spacing b/w itrn registers of the same vector. */ -#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ - ((_reg_start) + (((_ITR)) * (_itrn_indx_spacing))) +#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ + ((_reg_start) + ((_ITR) * (_itrn_indx_spacing))) /* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */ -#define PF_GLINT_ITR(_ITR, _INT) (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000)) +#define PF_GLINT_ITR(_ITR, _INT) \ + (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000)) #define PF_GLINT_ITR_MAX_INDEX 2 #define PF_GLINT_ITR_INTERVAL_S 0 -#define PF_GLINT_ITR_INTERVAL_M IDPF_M(0xFFF, PF_GLINT_ITR_INTERVAL_S) +#define PF_GLINT_ITR_INTERVAL_M GENMASK(11, 0) /* Timesync registers */ #define PF_TIMESYNC_BASE 0x08404000 #define PF_GLTSYN_CMD_SYNC (PF_TIMESYNC_BASE) #define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S 0 -#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M IDPF_M(0x3, PF_GLTSYN_CMD_SYNC_EXEC_CMD_S) +#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0) #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S 2 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S) #define PF_GLTSYN_SHTIME_0 (PF_TIMESYNC_BASE + 0x4) @@ -104,23 +105,23 @@ /* Generic registers */ #define PF_INT_DIR_OICR_ENA 0x08406000 #define PF_INT_DIR_OICR_ENA_S 0 -#define PF_INT_DIR_OICR_ENA_M IDPF_M(0xFFFFFFFF, PF_INT_DIR_OICR_ENA_S) +#define PF_INT_DIR_OICR_ENA_M GENMASK(31, 0) #define PF_INT_DIR_OICR 0x08406004 #define PF_INT_DIR_OICR_TSYN_EVNT 0 #define PF_INT_DIR_OICR_PHY_TS_0 BIT(1) #define PF_INT_DIR_OICR_PHY_TS_1 BIT(2) #define PF_INT_DIR_OICR_CAUSE 0x08406008 #define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0 -#define PF_INT_DIR_OICR_CAUSE_CAUSE_M IDPF_M(0xFFFFFFFF, PF_INT_DIR_OICR_CAUSE_CAUSE_S) +#define PF_INT_DIR_OICR_CAUSE_CAUSE_M GENMASK(31, 0) #define PF_INT_PBA_CLEAR 0x0840600C #define PF_FUNC_RID 0x08406010 #define PF_FUNC_RID_FUNCTION_NUMBER_S 0 -#define PF_FUNC_RID_FUNCTION_NUMBER_M IDPF_M(0x7, PF_FUNC_RID_FUNCTION_NUMBER_S) +#define PF_FUNC_RID_FUNCTION_NUMBER_M GENMASK(2, 0) #define PF_FUNC_RID_DEVICE_NUMBER_S 3 -#define PF_FUNC_RID_DEVICE_NUMBER_M IDPF_M(0x1F, PF_FUNC_RID_DEVICE_NUMBER_S) +#define PF_FUNC_RID_DEVICE_NUMBER_M GENMASK(7, 3) #define PF_FUNC_RID_BUS_NUMBER_S 8 -#define PF_FUNC_RID_BUS_NUMBER_M IDPF_M(0xFF, PF_FUNC_RID_BUS_NUMBER_S) +#define PF_FUNC_RID_BUS_NUMBER_M GENMASK(15, 8) /* Reset registers */ #define PFGEN_RTRIG 0x08407000 @@ -132,7 +133,7 @@ #define PFGEN_RTRIG_IMCR_M BIT(2) #define PFGEN_RSTAT 0x08407008 /* PFR Status */ #define PFGEN_RSTAT_PFR_STATE_S 0 -#define PFGEN_RSTAT_PFR_STATE_M IDPF_M(0x3, PFGEN_RSTAT_PFR_STATE_S) +#define PFGEN_RSTAT_PFR_STATE_M GENMASK(1, 0) #define PFGEN_CTRL 0x0840700C #define PFGEN_CTRL_PFSWR BIT(0) diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h b/drivers/common/idpf/base/idpf_lan_txrx.h index 7b03693eb12..c9eaeb5d3f1 100644 --- a/drivers/common/idpf/base/idpf_lan_txrx.h +++ b/drivers/common/idpf/base/idpf_lan_txrx.h @@ -8,9 +8,9 @@ #include "idpf_osdep.h" enum idpf_rss_hash { - /* Values 0 - 28 are reserved for future use */ - IDPF_HASH_INVALID = 0, - IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29, + IDPF_HASH_INVALID = 0, + /* Values 1 - 28 are reserved for future use */ + IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29, IDPF_HASH_NONF_MULTICAST_IPV4_UDP, IDPF_HASH_NONF_IPV4_UDP, IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK, @@ -19,7 +19,7 @@ enum idpf_rss_hash { IDPF_HASH_NONF_IPV4_OTHER, IDPF_HASH_FRAG_IPV4, /* Values 37-38 are reserved */ - IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39, + IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39, IDPF_HASH_NONF_MULTICAST_IPV6_UDP, IDPF_HASH_NONF_IPV6_UDP, IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK, @@ -32,96 +32,81 @@ enum idpf_rss_hash { IDPF_HASH_NONF_FCOE_RX, IDPF_HASH_NONF_FCOE_OTHER, /* Values 51-62 are reserved */ - IDPF_HASH_L2_PAYLOAD = 63, + IDPF_HASH_L2_PAYLOAD = 63, IDPF_HASH_MAX }; /* Supported RSS offloads */ -#define IDPF_DEFAULT_RSS_HASH ( \ - BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \ - BIT_ULL(IDPF_HASH_FRAG_IPV4) | \ - BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \ - BIT_ULL(IDPF_HASH_FRAG_IPV6) | \ +#define IDPF_DEFAULT_RSS_HASH \ + (BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \ + BIT_ULL(IDPF_HASH_FRAG_IPV4) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \ + BIT_ULL(IDPF_HASH_FRAG_IPV6) | \ BIT_ULL(IDPF_HASH_L2_PAYLOAD)) - /* TODO: Wrap below comment under internal flag - * Below 6 pcktypes are not supported by FVL or older products - * They are supported by FPK and future products - */ #define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \ - BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \ - BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \ - BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP)) /* For idpf_splitq_base_tx_compl_desc */ -#define IDPF_TXD_COMPLQ_GEN_S 15 +#define IDPF_TXD_COMPLQ_GEN_S 15 #define IDPF_TXD_COMPLQ_GEN_M BIT_ULL(IDPF_TXD_COMPLQ_GEN_S) #define IDPF_TXD_COMPLQ_COMPL_TYPE_S 11 -#define IDPF_TXD_COMPLQ_COMPL_TYPE_M \ - IDPF_M(0x7UL, IDPF_TXD_COMPLQ_COMPL_TYPE_S) -#define IDPF_TXD_COMPLQ_QID_S 0 -#define IDPF_TXD_COMPLQ_QID_M IDPF_M(0x3FFUL, IDPF_TXD_COMPLQ_QID_S) +#define IDPF_TXD_COMPLQ_COMPL_TYPE_M GENMASK_ULL(13, 11) +#define IDPF_TXD_COMPLQ_QID_S 0 +#define IDPF_TXD_COMPLQ_QID_M GENMASK_ULL(9, 0) /* For base mode TX descriptors */ -#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23 -#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S) -#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19 -#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M \ - (0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S) -#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12 -#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M \ - (0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S) +#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23 +#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M \ + BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S) +#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19 +#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M GENMASK_ULL(22, 19) +#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12 +#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M GENMASK_ULL(18, 12) #define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S 11 -#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \ +#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \ BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S) #define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST \ IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M -#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9 -#define IDPF_TXD_CTX_QW0_TUNN_NATT_M (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S) -#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S) -#define IDPF_TXD_CTX_GRE_TUNNELING (0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9 +#define IDPF_TXD_CTX_QW0_TUNN_NATT_M GENMASK_ULL(10, 9) +#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(9) +#define IDPF_TXD_CTX_GRE_TUNNELING BIT_ULL(10) #define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S 2 -#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M \ - (0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S) -#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0 -#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M \ - (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S) - -#define IDPF_TXD_CTX_QW1_MSS_S 50 -#define IDPF_TXD_CTX_QW1_MSS_M \ - IDPF_M(0x3FFFULL, IDPF_TXD_CTX_QW1_MSS_S) -#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30 -#define IDPF_TXD_CTX_QW1_TSO_LEN_M \ - IDPF_M(0x3FFFFULL, IDPF_TXD_CTX_QW1_TSO_LEN_S) -#define IDPF_TXD_CTX_QW1_CMD_S 4 -#define IDPF_TXD_CTX_QW1_CMD_M \ - IDPF_M(0xFFFUL, IDPF_TXD_CTX_QW1_CMD_S) -#define IDPF_TXD_CTX_QW1_DTYPE_S 0 -#define IDPF_TXD_CTX_QW1_DTYPE_M \ - IDPF_M(0xFUL, IDPF_TXD_CTX_QW1_DTYPE_S) -#define IDPF_TXD_QW1_L2TAG1_S 48 -#define IDPF_TXD_QW1_L2TAG1_M \ - IDPF_M(0xFFFFULL, IDPF_TXD_QW1_L2TAG1_S) -#define IDPF_TXD_QW1_TX_BUF_SZ_S 34 -#define IDPF_TXD_QW1_TX_BUF_SZ_M \ - IDPF_M(0x3FFFULL, IDPF_TXD_QW1_TX_BUF_SZ_S) -#define IDPF_TXD_QW1_OFFSET_S 16 -#define IDPF_TXD_QW1_OFFSET_M \ - IDPF_M(0x3FFFFULL, IDPF_TXD_QW1_OFFSET_S) -#define IDPF_TXD_QW1_CMD_S 4 -#define IDPF_TXD_QW1_CMD_M IDPF_M(0xFFFUL, IDPF_TXD_QW1_CMD_S) -#define IDPF_TXD_QW1_DTYPE_S 0 -#define IDPF_TXD_QW1_DTYPE_M IDPF_M(0xFUL, IDPF_TXD_QW1_DTYPE_S) +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M GENMASK_ULL(7, 2) +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0 +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M GENMASK_ULL(1, 0) + +#define IDPF_TXD_CTX_QW1_MSS_S 50 +#define IDPF_TXD_CTX_QW1_MSS_M GENMASK_ULL(63, 50) +#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30 +#define IDPF_TXD_CTX_QW1_TSO_LEN_M GENMASK_ULL(47, 30) +#define IDPF_TXD_CTX_QW1_CMD_S 4 +#define IDPF_TXD_CTX_QW1_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TXD_CTX_QW1_DTYPE_S 0 +#define IDPF_TXD_CTX_QW1_DTYPE_M GENMASK_ULL(3, 0) +#define IDPF_TXD_QW1_L2TAG1_S 48 +#define IDPF_TXD_QW1_L2TAG1_M GENMASK_ULL(63, 48) +#define IDPF_TXD_QW1_TX_BUF_SZ_S 34 +#define IDPF_TXD_QW1_TX_BUF_SZ_M GENMASK_ULL(47, 34) +#define IDPF_TXD_QW1_OFFSET_S 16 +#define IDPF_TXD_QW1_OFFSET_M GENMASK_ULL(33, 16) +#define IDPF_TXD_QW1_CMD_S 4 +#define IDPF_TXD_QW1_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TXD_QW1_DTYPE_S 0 +#define IDPF_TXD_QW1_DTYPE_M GENMASK_ULL(3, 0) /* TX Completion Descriptor Completion Types */ #define IDPF_TXD_COMPLT_ITR_FLUSH 0 @@ -134,19 +119,19 @@ enum idpf_rss_hash { enum idpf_tx_desc_dtype_value { IDPF_TX_DESC_DTYPE_DATA = 0, IDPF_TX_DESC_DTYPE_CTX = 1, - IDPF_TX_DESC_DTYPE_REINJECT_CTX = 2, - IDPF_TX_DESC_DTYPE_FLEX_DATA = 3, - IDPF_TX_DESC_DTYPE_FLEX_CTX = 4, + /* DTYPE 2 is reserved + * DTYPE 3 is free for future use + * DTYPE 4 is reserved + */ IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5, - IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 = 6, + /* DTYPE 6 is reserved */ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7, - IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX = 8, - IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX = 9, - IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX = 10, - IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX = 11, + /* DTYPE 8, 9 are free for future use + * DTYPE 10 is reserved + * DTYPE 11 is free for future use + */ IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12, - IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX = 13, - IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX = 14, + /* DTYPE 13, 14 are free for future use */ /* DESC_DONE - HW has completed write-back of descriptor */ IDPF_TX_DESC_DTYPE_DESC_DONE = 15, }; @@ -172,10 +157,10 @@ enum idpf_tx_desc_len_fields { IDPF_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ }; -#define IDPF_TXD_QW1_MACLEN_M IDPF_M(0x7FUL, IDPF_TX_DESC_LEN_MACLEN_S) -#define IDPF_TXD_QW1_IPLEN_M IDPF_M(0x7FUL, IDPF_TX_DESC_LEN_IPLEN_S) -#define IDPF_TXD_QW1_L4LEN_M IDPF_M(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S) -#define IDPF_TXD_QW1_FCLEN_M IDPF_M(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S) +#define IDPF_TXD_QW1_MACLEN_M GENMASK_ULL(6, 0) +#define IDPF_TXD_QW1_IPLEN_M GENMASK_ULL(13, 7) +#define IDPF_TXD_QW1_L4LEN_M GENMASK_ULL(17, 14) +#define IDPF_TXD_QW1_FCLEN_M GENMASK_ULL(17, 14) enum idpf_tx_base_desc_cmd_bits { IDPF_TX_DESC_CMD_EOP = 0x0001, @@ -240,29 +225,18 @@ enum idpf_tx_flex_desc_cmd_bits { struct idpf_flex_tx_desc { __le64 buf_addr; /* Packet buffer address */ struct { - __le16 cmd_dtype; -#define IDPF_FLEX_TXD_QW1_DTYPE_S 0 -#define IDPF_FLEX_TXD_QW1_DTYPE_M \ - IDPF_M(0x1FUL, IDPF_FLEX_TXD_QW1_DTYPE_S) +#define IDPF_FLEX_TXD_QW1_DTYPE_S 0 +#define IDPF_FLEX_TXD_QW1_DTYPE_M GENMASK(4, 0) #define IDPF_FLEX_TXD_QW1_CMD_S 5 -#define IDPF_FLEX_TXD_QW1_CMD_M IDPF_M(0x7FFUL, IDPF_TXD_QW1_CMD_S) +#define IDPF_FLEX_TXD_QW1_CMD_M GENMASK(15, 5) + __le16 cmd_dtype; union { - /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */ - u8 raw[4]; - - /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */ - struct { - __le16 l2tag1; - u8 flex; - u8 tsync; - } tsync; - /* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */ struct { __le16 l2tag1; __le16 l2tag2; } l2tags; - } flex; + }; __le16 buf_size; } qw1; }; @@ -312,16 +286,6 @@ struct idpf_flex_tx_tso_ctx_qw { }; union idpf_flex_tx_ctx_desc { - /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */ - struct { - u8 qw0_flex[8]; - struct { - __le16 cmd_dtype; - __le16 l2tag1; - u8 qw1_flex[4]; - } qw1; - } gen; - /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */ struct { struct idpf_flex_tx_tso_ctx_qw qw0; @@ -330,98 +294,6 @@ union idpf_flex_tx_ctx_desc { u8 flex[6]; } qw1; } tso; - - /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */ - struct { - struct idpf_flex_tx_tso_ctx_qw qw0; - struct { - __le16 cmd_dtype; - __le16 l2tag2; - u8 flex0; - u8 ptag; - u8 flex1[2]; - } qw1; - } tso_l2tag2_ptag; - - /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */ - struct { - u8 qw0_flex[8]; - struct { - __le16 cmd_dtype; - __le16 l2tag2; - u8 flex[4]; - } qw1; - } l2tag2; - - /* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */ - struct { - struct { - __le32 sa_domain; -#define IDPF_TXD_FLEX_CTX_SA_DOM_M 0xFFFF -#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL 0x10000 - __le32 sa_idx; -#define IDPF_TXD_FLEX_CTX_SAIDX_M 0x1FFFFF - } qw0; - struct { - __le16 cmd_dtype; - __le16 txr2comp; -#define IDPF_TXD_FLEX_CTX_TXR2COMP 0x1 - __le16 miss_txq_comp_tag; - __le16 miss_txq_id; - } qw1; - } reinjection_pkt; }; -/* Host Split Context Descriptors */ -struct idpf_flex_tx_hs_ctx_desc { - union { - struct { - __le32 host_fnum_tlen; -#define IDPF_TXD_FLEX_CTX_TLEN_S 0 -/* see IDPF_TXD_FLEX_CTX_TLEN_M for mask definition */ -#define IDPF_TXD_FLEX_CTX_FNUM_S 18 -#define IDPF_TXD_FLEX_CTX_FNUM_M 0x7FF -#define IDPF_TXD_FLEX_CTX_HOST_S 29 -#define IDPF_TXD_FLEX_CTX_HOST_M 0x7 - __le16 ftype_mss_rt; -#define IDPF_TXD_FLEX_CTX_MSS_RT_0 0 -#define IDPF_TXD_FLEX_CTX_MSS_RT_M 0x3FFF -#define IDPF_TXD_FLEX_CTX_FTYPE_S 14 -#define IDPF_TXD_FLEX_CTX_FTYPE_VF IDPF_M(0x0, IDPF_TXD_FLEX_CTX_FTYPE_S) -#define IDPF_TXD_FLEX_CTX_FTYPE_VDEV IDPF_M(0x1, IDPF_TXD_FLEX_CTX_FTYPE_S) -#define IDPF_TXD_FLEX_CTX_FTYPE_PF IDPF_M(0x2, IDPF_TXD_FLEX_CTX_FTYPE_S) - u8 hdr_len; - u8 ptag; - } tso; - struct { - u8 flex0[2]; - __le16 host_fnum_ftype; - u8 flex1[3]; - u8 ptag; - } no_tso; - } qw0; - - __le64 qw1_cmd_dtype; -#define IDPF_TXD_FLEX_CTX_QW1_PASID_S 16 -#define IDPF_TXD_FLEX_CTX_QW1_PASID_M 0xFFFFF -#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID_S 36 -#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID \ - IDPF_M(0x1, IDPF_TXD_FLEX_CTX_PASID_VALID_S) -#define IDPF_TXD_FLEX_CTX_QW1_TPH_S 37 -#define IDPF_TXD_FLEX_CTX_QW1_TPH \ - IDPF_M(0x1, IDPF_TXD_FLEX_CTX_TPH_S) -#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_S 38 -#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_M 0xF -/* The following are only valid for DTYPE = 0x09 and DTYPE = 0x0A */ -#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_S 42 -#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_M 0x1FFFFF -#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S 63 -#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VALID \ - IDPF_M(0x1, IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S) -/* The following are only valid for DTYPE = 0x0D and DTYPE = 0x0E */ -#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_S 48 -#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_M 0xFF -#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_S 56 -#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_M 0xFF -}; #endif /* _IDPF_LAN_TXRX_H_ */ diff --git a/drivers/common/idpf/base/idpf_lan_vf_regs.h b/drivers/common/idpf/base/idpf_lan_vf_regs.h index b5ff9b2cc93..eae28478ffc 100644 --- a/drivers/common/idpf/base/idpf_lan_vf_regs.h +++ b/drivers/common/idpf/base/idpf_lan_vf_regs.h @@ -9,7 +9,7 @@ /* Reset */ #define VFGEN_RSTAT 0x00008800 #define VFGEN_RSTAT_VFR_STATE_S 0 -#define VFGEN_RSTAT_VFR_STATE_M IDPF_M(0x3, VFGEN_RSTAT_VFR_STATE_S) +#define VFGEN_RSTAT_VFR_STATE_M GENMASK(1, 0) /* Control(VF Mailbox) Queue */ #define VF_BASE 0x00006000 @@ -18,7 +18,7 @@ #define VF_ATQBAH (VF_BASE + 0x1800) #define VF_ATQLEN (VF_BASE + 0x0800) #define VF_ATQLEN_ATQLEN_S 0 -#define VF_ATQLEN_ATQLEN_M IDPF_M(0x3FF, VF_ATQLEN_ATQLEN_S) +#define VF_ATQLEN_ATQLEN_M GENMASK(9, 0) #define VF_ATQLEN_ATQVFE_S 28 #define VF_ATQLEN_ATQVFE_M BIT(VF_ATQLEN_ATQVFE_S) #define VF_ATQLEN_ATQOVFL_S 29 @@ -29,14 +29,14 @@ #define VF_ATQLEN_ATQENABLE_M BIT(VF_ATQLEN_ATQENABLE_S) #define VF_ATQH (VF_BASE + 0x0400) #define VF_ATQH_ATQH_S 0 -#define VF_ATQH_ATQH_M IDPF_M(0x3FF, VF_ATQH_ATQH_S) +#define VF_ATQH_ATQH_M GENMASK(9, 0) #define VF_ATQT (VF_BASE + 0x2400) #define VF_ARQBAL (VF_BASE + 0x0C00) #define VF_ARQBAH (VF_BASE) #define VF_ARQLEN (VF_BASE + 0x2000) #define VF_ARQLEN_ARQLEN_S 0 -#define VF_ARQLEN_ARQLEN_M IDPF_M(0x3FF, VF_ARQLEN_ARQLEN_S) +#define VF_ARQLEN_ARQLEN_M GENMASK(9, 0) #define VF_ARQLEN_ARQVFE_S 28 #define VF_ARQLEN_ARQVFE_M BIT(VF_ARQLEN_ARQVFE_S) #define VF_ARQLEN_ARQOVFL_S 29 @@ -47,7 +47,7 @@ #define VF_ARQLEN_ARQENABLE_M BIT(VF_ARQLEN_ARQENABLE_S) #define VF_ARQH (VF_BASE + 0x1400) #define VF_ARQH_ARQH_S 0 -#define VF_ARQH_ARQH_M IDPF_M(0x1FFF, VF_ARQH_ARQH_S) +#define VF_ARQH_ARQH_M GENMASK(12, 0) #define VF_ARQT (VF_BASE + 0x1000) /* Transmit queues */ @@ -69,7 +69,7 @@ #define VF_INT_DYN_CTL0_INTENA_S 0 #define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S) #define VF_INT_DYN_CTL0_ITR_INDX_S 3 -#define VF_INT_DYN_CTL0_ITR_INDX_M IDPF_M(0x3, VF_INT_DYN_CTL0_ITR_INDX_S) +#define VF_INT_DYN_CTL0_ITR_INDX_M GENMASK(4, 3) #define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4)) #define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4)) #define VF_INT_DYN_CTLN_INTENA_S 0 @@ -79,7 +79,7 @@ #define VF_INT_DYN_CTLN_SWINT_TRIG_S 2 #define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S) #define VF_INT_DYN_CTLN_ITR_INDX_S 3 -#define VF_INT_DYN_CTLN_ITR_INDX_M IDPF_M(0x3, VF_INT_DYN_CTLN_ITR_INDX_S) +#define VF_INT_DYN_CTLN_ITR_INDX_M GENMASK(4, 3) #define VF_INT_DYN_CTLN_INTERVAL_S 5 #define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S) #define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24 @@ -94,17 +94,26 @@ * b/w itrn registers of the same vector */ #define VF_INT_ITR0(_ITR) (0x00004C00 + ((_ITR) * 4)) -#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ - ((_reg_start) + (((_ITR)) * (_itrn_indx_spacing))) -/* For VF with 16 vector support, itrn_reg_spacing is 0x4 and itrn_indx_spacing is 0x40 */ -#define VF_INT_ITRN(_INT, _ITR) (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40)) -/* For VF with 64 vector support, itrn_reg_spacing is 0x4 and itrn_indx_spacing is 0x100 */ -#define VF_INT_ITRN_64(_INT, _ITR) (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100)) -/* For VF with 2k vector support, itrn_reg_spacing is 0x4 and itrn_indx_spacing is 0x2000 */ -#define VF_INT_ITRN_2K(_INT, _ITR) (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000)) +#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ + ((_reg_start) + ((_ITR) * (_itrn_indx_spacing))) +/* For VF with 16 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x40 and base register offset is 0x00002800 + */ +#define VF_INT_ITRN(_INT, _ITR) \ + (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40)) +/* For VF with 64 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x100 and base register offset is 0x00002C00 + */ +#define VF_INT_ITRN_64(_INT, _ITR) \ + (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100)) +/* For VF with 2k vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x2000 and base register offset is 0x00072000 + */ +#define VF_INT_ITRN_2K(_INT, _ITR) \ + (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000)) #define VF_INT_ITRN_MAX_INDEX 2 #define VF_INT_ITRN_INTERVAL_S 0 -#define VF_INT_ITRN_INTERVAL_M IDPF_M(0xFFF, VF_INT_ITRN_INTERVAL_S) +#define VF_INT_ITRN_INTERVAL_M GENMASK(11, 0) #define VF_INT_PBA_CLEAR 0x00008900 #define VF_INT_ICR0_ENA1 0x00005000 diff --git a/drivers/common/idpf/base/idpf_osdep.h b/drivers/common/idpf/base/idpf_osdep.h index 2a817a98075..74a376cb13b 100644 --- a/drivers/common/idpf/base/idpf_osdep.h +++ b/drivers/common/idpf/base/idpf_osdep.h @@ -48,6 +48,13 @@ typedef struct idpf_lock idpf_lock; #define IDPF_M(m, s) ((m) << (s)) +#define BITS_PER_LONG (8 * sizeof(long)) +#define BITS_PER_LONG_LONG (8 * sizeof(long long)) +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + #ifndef ETH_ADDR_LEN #define ETH_ADDR_LEN 6 #endif diff --git a/drivers/common/idpf/base/idpf_prototype.h b/drivers/common/idpf/base/idpf_prototype.h index 988ff00506c..e2f090a9e35 100644 --- a/drivers/common/idpf/base/idpf_prototype.h +++ b/drivers/common/idpf/base/idpf_prototype.h @@ -20,7 +20,7 @@ #define APF int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size); -int idpf_deinit_hw(struct idpf_hw *hw); +void idpf_deinit_hw(struct idpf_hw *hw); int idpf_clean_arq_element(struct idpf_hw *hw, struct idpf_arq_event_info *e, diff --git a/drivers/common/idpf/base/siov_regs.h b/drivers/common/idpf/base/siov_regs.h index fad329601ae..7e1ae2e300d 100644 --- a/drivers/common/idpf/base/siov_regs.h +++ b/drivers/common/idpf/base/siov_regs.h @@ -4,16 +4,6 @@ #ifndef _SIOV_REGS_H_ #define _SIOV_REGS_H_ #define VDEV_MBX_START 0x20000 /* Begin at 128KB */ -#define VDEV_MBX_ATQBAL (VDEV_MBX_START + 0x0000) -#define VDEV_MBX_ATQBAH (VDEV_MBX_START + 0x0004) -#define VDEV_MBX_ATQLEN (VDEV_MBX_START + 0x0008) -#define VDEV_MBX_ATQH (VDEV_MBX_START + 0x000C) -#define VDEV_MBX_ATQT (VDEV_MBX_START + 0x0010) -#define VDEV_MBX_ARQBAL (VDEV_MBX_START + 0x0014) -#define VDEV_MBX_ARQBAH (VDEV_MBX_START + 0x0018) -#define VDEV_MBX_ARQLEN (VDEV_MBX_START + 0x001C) -#define VDEV_MBX_ARQH (VDEV_MBX_START + 0x0020) -#define VDEV_MBX_ARQT (VDEV_MBX_START + 0x0024) #define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */ /* Begin at offset after 1MB (after 256 4k pages) */ @@ -43,5 +33,6 @@ #define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08) #define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C) -/* Next offset to begin at 42MB (0x2A00000) */ +#define SIOV_REG_BAR_SIZE 0x2A00000 +/* Next offset to begin at 42MB + 4K (0x2A00000 + 0x1000) */ #endif /* _SIOV_REGS_H_ */ diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h index 594bc26b8ca..3900b784d03 100644 --- a/drivers/common/idpf/base/virtchnl2.h +++ b/drivers/common/idpf/base/virtchnl2.h @@ -89,14 +89,18 @@ * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW */ /* opcodes 529, 530, and 531 are reserved */ -#define VIRTCHNL2_OP_CREATE_ADI 532 -#define VIRTCHNL2_OP_DESTROY_ADI 533 +#define VIRTCHNL2_OP_NON_FLEX_CREATE_ADI 532 +#define VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI 533 #define VIRTCHNL2_OP_LOOPBACK 534 #define VIRTCHNL2_OP_ADD_MAC_ADDR 535 #define VIRTCHNL2_OP_DEL_MAC_ADDR 536 #define VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE 537 #define VIRTCHNL2_OP_ADD_QUEUE_GROUPS 538 #define VIRTCHNL2_OP_DEL_QUEUE_GROUPS 539 +#define VIRTCHNL2_OP_GET_PORT_STATS 540 + /* TimeSync opcodes */ +#define VIRTCHNL2_OP_GET_PTP_CAPS 541 +#define VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES 542 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX 0xFFFF @@ -230,6 +234,10 @@ #define VIRTCHNL2_CAP_RX_FLEX_DESC BIT(17) #define VIRTCHNL2_CAP_PTYPE BIT(18) #define VIRTCHNL2_CAP_LOOPBACK BIT(19) +/* Enable miss completion types plus ability to detect a miss completion if a + * reserved bit is set in a standared completion's tag. + */ +#define VIRTCHNL2_CAP_MISS_COMPL_TAG BIT(20) /* this must be the last capability */ #define VIRTCHNL2_CAP_OEM BIT(63) @@ -286,6 +294,7 @@ /* These messages are only sent to PF from CP */ #define VIRTCHNL2_EVENT_START_RESET_ADI 2 #define VIRTCHNL2_EVENT_FINISH_RESET_ADI 3 +#define VIRTCHNL2_EVENT_ADI_ACTIVE 4 /* VIRTCHNL2_QUEUE_TYPE * Transmit and Receive queue types are valid in legacy as well as split queue @@ -539,7 +548,8 @@ struct virtchnl2_get_capabilities { u8 max_sg_bufs_per_tx_pkt; u8 reserved1; - __le16 pad1; + /* upper bound of number of ADIs supported */ + __le16 max_adis; /* version of Control Plane that is running */ __le16 oem_cp_ver_major; @@ -582,6 +592,9 @@ struct virtchnl2_queue_reg_chunks { VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks); +/* VIRTCHNL2_VPORT_FLAGS */ +#define VIRTCHNL2_VPORT_UPLINK_PORT BIT(0) + #define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS 6 /* VIRTCHNL2_OP_CREATE_VPORT @@ -620,7 +633,8 @@ struct virtchnl2_create_vport { __le16 max_mtu; __le32 vport_id; u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS]; - __le16 pad; + /* see VIRTCHNL2_VPORT_FLAGS definitions */ + __le16 vport_flags; /* see VIRTCHNL2_RX_DESC_IDS definitions */ __le64 rx_desc_ids; /* see VIRTCHNL2_TX_DESC_IDS definitions */ @@ -1047,14 +1061,34 @@ struct virtchnl2_sriov_vfs_info { VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info); -/* VIRTCHNL2_OP_CREATE_ADI +/* structure to specify single chunk of queue */ +/* 'chunks' is fixed size(not flexible) and will be deprecated at some point */ +struct virtchnl2_non_flex_queue_reg_chunks { + __le16 num_chunks; + u8 reserved[6]; + struct virtchnl2_queue_reg_chunk chunks[1]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_non_flex_queue_reg_chunks); + +/* structure to specify single chunk of interrupt vector */ +/* 'vchunks' is fixed size(not flexible) and will be deprecated at some point */ +struct virtchnl2_non_flex_vector_chunks { + __le16 num_vchunks; + u8 reserved[14]; + struct virtchnl2_vector_chunk vchunks[1]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_non_flex_vector_chunks); + +/* VIRTCHNL2_OP_NON_FLEX_CREATE_ADI * PF sends this message to CP to create ADI by filling in required - * fields of virtchnl2_create_adi structure. - * CP responds with the updated virtchnl2_create_adi structure containing the - * necessary fields followed by chunks which in turn will have an array of + * fields of virtchnl2_non_flex_create_adi structure. + * CP responds with the updated virtchnl2_non_flex_create_adi structure containing + * the necessary fields followed by chunks which in turn will have an array of * num_chunks entries of virtchnl2_queue_chunk structures. */ -struct virtchnl2_create_adi { +struct virtchnl2_non_flex_create_adi { /* PF sends PASID to CP */ __le32 pasid; /* @@ -1064,29 +1098,31 @@ struct virtchnl2_create_adi { __le16 mbx_id; /* PF sends mailbox vector id to CP */ __le16 mbx_vec_id; + /* PF populates this ADI index */ + __le16 adi_index; /* CP populates ADI id */ __le16 adi_id; u8 reserved[64]; - u8 pad[6]; + u8 pad[4]; /* CP populates queue chunks */ - struct virtchnl2_queue_reg_chunks chunks; + struct virtchnl2_non_flex_queue_reg_chunks chunks; /* PF sends vector chunks to CP */ - struct virtchnl2_vector_chunks vchunks; + struct virtchnl2_non_flex_vector_chunks vchunks; }; -VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_create_adi); +VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_non_flex_create_adi); /* VIRTCHNL2_OP_DESTROY_ADI * PF sends this message to CP to destroy ADI by filling * in the adi_id in virtchnl2_destropy_adi structure. * CP responds with the status of the requested operation. */ -struct virtchnl2_destroy_adi { +struct virtchnl2_non_flex_destroy_adi { __le16 adi_id; u8 reserved[2]; }; -VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_destroy_adi); +VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_non_flex_destroy_adi); /* Based on the descriptor type the PF supports, CP fills ptype_id_10 or * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value @@ -1159,6 +1195,74 @@ struct virtchnl2_vport_stats { VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats); +/* physical port statistics */ +struct virtchnl2_phy_port_stats { + __le64 rx_bytes; + __le64 rx_unicast_pkts; + __le64 rx_multicast_pkts; + __le64 rx_broadcast_pkts; + __le64 rx_size_64_pkts; + __le64 rx_size_127_pkts; + __le64 rx_size_255_pkts; + __le64 rx_size_511_pkts; + __le64 rx_size_1023_pkts; + __le64 rx_size_1518_pkts; + __le64 rx_size_jumbo_pkts; + __le64 rx_xon_events; + __le64 rx_xoff_events; + __le64 rx_undersized_pkts; + __le64 rx_fragmented_pkts; + __le64 rx_oversized_pkts; + __le64 rx_jabber_pkts; + __le64 rx_csum_errors; + __le64 rx_length_errors; + __le64 rx_dropped_pkts; + __le64 rx_crc_errors; + /* Frames with length < 64 and a bad CRC */ + __le64 rx_runt_errors; + __le64 rx_illegal_bytes; + __le64 rx_total_pkts; + u8 rx_reserved[128]; + + __le64 tx_bytes; + __le64 tx_unicast_pkts; + __le64 tx_multicast_pkts; + __le64 tx_broadcast_pkts; + __le64 tx_errors; + __le64 tx_timeout_events; + __le64 tx_size_64_pkts; + __le64 tx_size_127_pkts; + __le64 tx_size_255_pkts; + __le64 tx_size_511_pkts; + __le64 tx_size_1023_pkts; + __le64 tx_size_1518_pkts; + __le64 tx_size_jumbo_pkts; + __le64 tx_xon_events; + __le64 tx_xoff_events; + __le64 tx_dropped_link_down_pkts; + __le64 tx_total_pkts; + u8 tx_reserved[128]; + __le64 mac_local_faults; + __le64 mac_remote_faults; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(600, virtchnl2_phy_port_stats); + +/* VIRTCHNL2_OP_GET_PORT_STATS + * PF/VF sends this message to CP to get the updated stats by specifying the + * vport_id. CP responds with stats in struct virtchnl2_port_stats that + * includes both physical port as well as vport statistics. + */ +struct virtchnl2_port_stats { + __le32 vport_id; + u8 pad[4]; + + struct virtchnl2_phy_port_stats phy_port_stats; + struct virtchnl2_vport_stats virt_port_stats; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(736, virtchnl2_port_stats); + /* VIRTCHNL2_OP_EVENT * CP sends this message to inform the PF/VF driver of events that may affect * it. No direct response is expected from the driver, though it may generate @@ -1318,6 +1422,112 @@ struct virtchnl2_promisc_info { VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info); +/* VIRTCHNL2_PTP_CAPS + * PTP capabilities + */ +#define VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIME BIT(0) +#define VIRTCHNL2_PTP_CAP_PTM BIT(1) +#define VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL BIT(2) +#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECT BIT(3) +#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL BIT(4) + +/* Legacy cross time registers offsets */ +struct virtchnl2_ptp_legacy_cross_time_reg { + __le32 shadow_time_0; + __le32 shadow_time_l; + __le32 shadow_time_h; + __le32 cmd_sync; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_legacy_cross_time_reg); + +/* PTM cross time registers offsets */ +struct virtchnl2_ptp_ptm_cross_time_reg { + __le32 art_l; + __le32 art_h; + __le32 cmd_sync; + u8 pad[4]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_ptm_cross_time_reg); + +/* Registers needed to control the main clock */ +struct virtchnl2_ptp_device_clock_control { + __le32 cmd; + __le32 incval_l; + __le32 incval_h; + __le32 shadj_l; + __le32 shadj_h; + u8 pad[4]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_device_clock_control); + +/* Structure that defines tx tstamp entry - index and register offset */ +struct virtchnl2_ptp_tx_tstamp_entry { + __le32 tx_latch_register_base; + __le32 tx_latch_register_offset; + u8 index; + u8 pad[7]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_entry); + +/* Structure that defines tx tstamp entries - total number of latches + * and the array of entries. + */ +struct virtchnl2_ptp_tx_tstamp { + __le16 num_latches; + /* latch size expressed in bits */ + __le16 latch_size; + u8 pad[4]; + struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[1]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp); + +/* VIRTCHNL2_OP_GET_PTP_CAPS + * PV/VF sends this message to negotiate PTP capabilities. CP updates bitmap + * with supported features and fulfills appropriate structures. + */ +struct virtchnl2_get_ptp_caps { + /* PTP capability bitmap */ + /* see VIRTCHNL2_PTP_CAPS definitions */ + __le32 ptp_caps; + u8 pad[4]; + + struct virtchnl2_ptp_legacy_cross_time_reg legacy_cross_time_reg; + struct virtchnl2_ptp_ptm_cross_time_reg ptm_cross_time_reg; + struct virtchnl2_ptp_device_clock_control device_clock_control; + struct virtchnl2_ptp_tx_tstamp tx_tstamp; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_get_ptp_caps); + +/* Structure that describes tx tstamp values, index and validity */ +struct virtchnl2_ptp_tx_tstamp_latch { + __le32 tstamp_h; + __le32 tstamp_l; + u8 index; + u8 valid; + u8 pad[6]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch); + +/* VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES + * PF/VF sends this message to receive a specified number of timestamps + * entries. + */ +struct virtchnl2_ptp_tx_tstamp_latches { + __le16 num_latches; + /* latch size expressed in bits */ + __le16 latch_size; + u8 pad[4]; + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[1]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp_latches); static inline const char *virtchnl2_op_str(__le32 v_opcode) { @@ -1376,14 +1586,20 @@ static inline const char *virtchnl2_op_str(__le32 v_opcode) return "VIRTCHNL2_OP_EVENT"; case VIRTCHNL2_OP_RESET_VF: return "VIRTCHNL2_OP_RESET_VF"; - case VIRTCHNL2_OP_CREATE_ADI: - return "VIRTCHNL2_OP_CREATE_ADI"; - case VIRTCHNL2_OP_DESTROY_ADI: - return "VIRTCHNL2_OP_DESTROY_ADI"; + case VIRTCHNL2_OP_NON_FLEX_CREATE_ADI: + return "VIRTCHNL2_OP_NON_FLEX_CREATE_ADI"; + case VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI: + return "VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI"; case VIRTCHNL2_OP_ADD_QUEUE_GROUPS: return "VIRTCHNL2_OP_ADD_QUEUE_GROUPS"; case VIRTCHNL2_OP_DEL_QUEUE_GROUPS: return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS"; + case VIRTCHNL2_OP_GET_PORT_STATS: + return "VIRTCHNL2_OP_GET_PORT_STATS"; + case VIRTCHNL2_OP_GET_PTP_CAPS: + return "VIRTCHNL2_OP_GET_PTP_CAPS"; + case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES: + return "VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES"; default: return "Unsupported (update virtchnl2.h)"; } @@ -1428,11 +1644,11 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3 sizeof(struct virtchnl2_queue_reg_chunk); } break; - case VIRTCHNL2_OP_CREATE_ADI: - valid_len = sizeof(struct virtchnl2_create_adi); + case VIRTCHNL2_OP_NON_FLEX_CREATE_ADI: + valid_len = sizeof(struct virtchnl2_non_flex_create_adi); if (msglen >= valid_len) { - struct virtchnl2_create_adi *cadi = - (struct virtchnl2_create_adi *)msg; + struct virtchnl2_non_flex_create_adi *cadi = + (struct virtchnl2_non_flex_create_adi *)msg; if (cadi->chunks.num_chunks == 0) { /* zero chunks is allowed as input */ @@ -1449,8 +1665,8 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3 sizeof(struct virtchnl2_vector_chunk); } break; - case VIRTCHNL2_OP_DESTROY_ADI: - valid_len = sizeof(struct virtchnl2_destroy_adi); + case VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI: + valid_len = sizeof(struct virtchnl2_non_flex_destroy_adi); break; case VIRTCHNL2_OP_DESTROY_VPORT: case VIRTCHNL2_OP_ENABLE_VPORT: @@ -1648,8 +1864,43 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3 case VIRTCHNL2_OP_GET_STATS: valid_len = sizeof(struct virtchnl2_vport_stats); break; + case VIRTCHNL2_OP_GET_PORT_STATS: + valid_len = sizeof(struct virtchnl2_port_stats); + break; case VIRTCHNL2_OP_RESET_VF: break; + case VIRTCHNL2_OP_GET_PTP_CAPS: + valid_len = sizeof(struct virtchnl2_get_ptp_caps); + + if (msglen >= valid_len) { + struct virtchnl2_get_ptp_caps *ptp_caps = + (struct virtchnl2_get_ptp_caps *)msg; + + if (ptp_caps->tx_tstamp.num_latches == 0) { + err_msg_format = true; + break; + } + + valid_len += ((ptp_caps->tx_tstamp.num_latches - 1) * + sizeof(struct virtchnl2_ptp_tx_tstamp_entry)); + } + break; + case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES: + valid_len = sizeof(struct virtchnl2_ptp_tx_tstamp_latches); + + if (msglen >= valid_len) { + struct virtchnl2_ptp_tx_tstamp_latches *tx_tstamp_latches = + (struct virtchnl2_ptp_tx_tstamp_latches *)msg; + + if (tx_tstamp_latches->num_latches == 0) { + err_msg_format = true; + break; + } + + valid_len += ((tx_tstamp_latches->num_latches - 1) * + sizeof(struct virtchnl2_ptp_tx_tstamp_latch)); + } + break; /* These are always errors coming from the VF. */ case VIRTCHNL2_OP_EVENT: case VIRTCHNL2_OP_UNKNOWN: diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c index fc87e3e2434..83b131ef280 100644 --- a/drivers/common/idpf/idpf_common_rxtx.c +++ b/drivers/common/idpf/idpf_common_rxtx.c @@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq) } txe = txq->sw_ring; - size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc; + size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc; for (i = 0; i < size; i++) ((volatile char *)txq->tx_ring)[i] = 0; prev = (uint16_t)(txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { - txq->tx_ring[i].qw1.cmd_dtype = - rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE); + txq->tx_ring[i].qw1 = + rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -871,6 +871,7 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_to_clean; uint16_t nb_tx = 0; uint64_t ol_flags; + uint8_t cmd_dtype; uint16_t nb_ctx; if (unlikely(txq == NULL) || unlikely(!txq->q_started)) @@ -902,6 +903,7 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (txq->nb_free < tx_pkt->nb_segs) break; + cmd_dtype = 0; ol_flags = tx_pkt->ol_flags; tx_offload.l2_len = tx_pkt->l2_len; tx_offload.l3_len = tx_pkt->l3_len; @@ -911,6 +913,9 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, nb_ctx = idpf_calc_context_desc(ol_flags); nb_used = tx_pkt->nb_segs + nb_ctx; + if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) + cmd_dtype = IDPF_TXD_FLEX_FLOW_CMD_CS_EN; + /* context descriptor */ if (nb_ctx != 0) { volatile union idpf_flex_tx_ctx_desc *ctx_desc = @@ -933,8 +938,8 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Setup TX descriptor */ txd->buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); - txd->qw1.cmd_dtype = - rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE); + cmd_dtype |= IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; + txd->qw1.cmd_dtype = cmd_dtype; txd->qw1.rxr_bufsize = tx_pkt->data_len; txd->qw1.compl_tag = sw_id; tx_id++; @@ -948,8 +953,6 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* fill the last descriptor with End of Packet (EOP) bit */ txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP; - if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) - txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; txq->nb_free = (uint16_t)(txq->nb_free - nb_used); txq->nb_used = (uint16_t)(txq->nb_used + nb_used); @@ -1307,17 +1310,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq) uint16_t nb_tx_to_clean; uint16_t i; - volatile struct idpf_flex_tx_desc *txd = txq->tx_ring; + volatile struct idpf_base_tx_desc *txd = txq->tx_ring; desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh); if (desc_to_clean_to >= nb_tx_desc) desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - /* In the writeback Tx desccriptor, the only significant fields are the 4-bit DTYPE */ - if ((txd[desc_to_clean_to].qw1.cmd_dtype & - rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) != - rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) { + if ((txd[desc_to_clean_to].qw1 & + rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) { TX_LOG(DEBUG, "TX descriptor %4u is not done " "(port=%d queue=%d)", desc_to_clean_to, txq->port_id, txq->queue_id); @@ -1331,10 +1333,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq) nb_tx_to_clean = (uint16_t)(desc_to_clean_to - last_desc_cleaned); - txd[desc_to_clean_to].qw1.cmd_dtype = 0; - txd[desc_to_clean_to].qw1.buf_size = 0; - for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++) - txd[desc_to_clean_to].qw1.flex.raw[i] = 0; + txd[desc_to_clean_to].qw1 = 0; txq->last_desc_cleaned = desc_to_clean_to; txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean); @@ -1347,8 +1346,8 @@ uint16_t idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - volatile struct idpf_flex_tx_desc *txd; - volatile struct idpf_flex_tx_desc *txr; + volatile struct idpf_base_tx_desc *txd; + volatile struct idpf_base_tx_desc *txr; union idpf_tx_offload tx_offload = {0}; struct idpf_tx_entry *txe, *txn; struct idpf_tx_entry *sw_ring; @@ -1356,6 +1355,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; uint64_t buf_dma_addr; + uint32_t td_offset; uint64_t ol_flags; uint16_t tx_last; uint16_t nb_used; @@ -1382,6 +1382,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { td_cmd = 0; + td_offset = 0; tx_pkt = *tx_pkts++; RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); @@ -1426,6 +1427,9 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } } + if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) + td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; + if (nb_ctx != 0) { /* Setup TX context descriptor if required */ volatile union idpf_flex_tx_ctx_desc *ctx_txd = @@ -1462,9 +1466,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, slen = m_seg->data_len; buf_dma_addr = rte_mbuf_data_iova(m_seg); txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); - txd->qw1.buf_size = slen; - txd->qw1.cmd_dtype = rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_DATA << - IDPF_FLEX_TXD_QW1_DTYPE_S); + txd->qw1 = rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << IDPF_TXD_QW1_CMD_S) | + ((uint64_t)td_offset << IDPF_TXD_QW1_OFFSET_S) | + ((uint64_t)slen << IDPF_TXD_QW1_TX_BUF_SZ_S)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -1473,7 +1478,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } while (m_seg); /* The last packet data descriptor needs End Of Packet (EOP) */ - td_cmd |= IDPF_TX_FLEX_DESC_CMD_EOP; + td_cmd |= IDPF_TX_DESC_CMD_EOP; txq->nb_used = (uint16_t)(txq->nb_used + nb_used); txq->nb_free = (uint16_t)(txq->nb_free - nb_used); @@ -1482,16 +1487,13 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, "%4u (port=%d queue=%d)", tx_last, txq->port_id, txq->queue_id); - td_cmd |= IDPF_TX_FLEX_DESC_CMD_RS; + td_cmd |= IDPF_TX_DESC_CMD_RS; /* Update txq RS bit counters */ txq->nb_used = 0; } - if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) - td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; - - txd->qw1.cmd_dtype |= rte_cpu_to_le_16(td_cmd << IDPF_FLEX_TXD_QW1_CMD_S); + txd->qw1 |= rte_cpu_to_le_16(td_cmd << IDPF_TXD_QW1_CMD_S); } end_of_tx: diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h index 6cb83fc0a66..b49b1ed737f 100644 --- a/drivers/common/idpf/idpf_common_rxtx.h +++ b/drivers/common/idpf/idpf_common_rxtx.h @@ -157,7 +157,7 @@ struct idpf_tx_entry { /* Structure associated with each TX queue. */ struct idpf_tx_queue { const struct rte_memzone *mz; /* memzone for Tx ring */ - volatile struct idpf_flex_tx_desc *tx_ring; /* Tx ring virtual address */ + volatile struct idpf_base_tx_desc *tx_ring; /* Tx ring virtual address */ volatile union { struct idpf_flex_tx_sched_desc *desc_ring; struct idpf_splitq_tx_compl_desc *compl_ring; diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c index 81312617cc6..f65e8d512b8 100644 --- a/drivers/common/idpf/idpf_common_rxtx_avx512.c +++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c @@ -505,11 +505,11 @@ _idpf_singleq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; @@ -966,7 +966,7 @@ _idpf_splitq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq, _mm512_and_epi64(raw_gen0_7, gen_check), _mm512_set1_epi64((uint64_t)rxq->expected_gen_id << 46)); const __mmask8 recv_mask = _kand_mask8(dd_mask, gen_mask); - uint16_t burst = __builtin_popcount(_cvtmask8_u32(recv_mask)); + uint16_t burst = rte_popcount32(_cvtmask8_u32(recv_mask)); received += burst; if (burst != IDPF_DESCS_PER_LOOP_AVX) @@ -1005,7 +1005,7 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq) struct rte_mbuf *m, *free[txq->rs_thresh]; /* check DD bits on threshold descriptor */ - if ((txq->tx_ring[txq->next_dd].qw1.cmd_dtype & + if ((txq->tx_ring[txq->next_dd].qw1 & rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) != rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) return 0; @@ -1113,15 +1113,14 @@ tx_backlog_entry_avx512(struct idpf_tx_vec_entry *txep, txep[i].mbuf = tx_pkts[i]; } -#define IDPF_FLEX_TXD_QW1_BUF_SZ_S 48 static __rte_always_inline void -idpf_singleq_vtx1(volatile struct idpf_flex_tx_desc *txdp, +idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { uint64_t high_qw = - (IDPF_TX_DESC_DTYPE_FLEX_DATA << IDPF_FLEX_TXD_QW1_DTYPE_S | - ((uint64_t)flags << IDPF_FLEX_TXD_QW1_CMD_S) | - ((uint64_t)pkt->data_len << IDPF_FLEX_TXD_QW1_BUF_SZ_S)); + (IDPF_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << IDPF_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << IDPF_TXD_QW1_TX_BUF_SZ_S)); __m128i descriptor = _mm_set_epi64x(high_qw, pkt->buf_iova + pkt->data_off); @@ -1131,11 +1130,11 @@ idpf_singleq_vtx1(volatile struct idpf_flex_tx_desc *txdp, #define IDPF_TX_LEN_MASK 0xAA #define IDPF_TX_OFF_MASK 0x55 static __rte_always_inline void -idpf_singleq_vtx(volatile struct idpf_flex_tx_desc *txdp, +idpf_singleq_vtx(volatile struct idpf_base_tx_desc *txdp, struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) { - const uint64_t hi_qw_tmpl = (IDPF_TX_DESC_DTYPE_FLEX_DATA | - ((uint64_t)flags << IDPF_FLEX_TXD_QW1_CMD_S)); + const uint64_t hi_qw_tmpl = (IDPF_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << IDPF_TXD_QW1_CMD_S)); /* if unaligned on 32-bit boundary, do one to align */ if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { @@ -1148,19 +1147,19 @@ idpf_singleq_vtx(volatile struct idpf_flex_tx_desc *txdp, uint64_t hi_qw3 = hi_qw_tmpl | ((uint64_t)pkt[3]->data_len << - IDPF_FLEX_TXD_QW1_BUF_SZ_S); + IDPF_TXD_QW1_TX_BUF_SZ_S); uint64_t hi_qw2 = hi_qw_tmpl | ((uint64_t)pkt[2]->data_len << - IDPF_FLEX_TXD_QW1_BUF_SZ_S); + IDPF_TXD_QW1_TX_BUF_SZ_S); uint64_t hi_qw1 = hi_qw_tmpl | ((uint64_t)pkt[1]->data_len << - IDPF_FLEX_TXD_QW1_BUF_SZ_S); + IDPF_TXD_QW1_TX_BUF_SZ_S); uint64_t hi_qw0 = hi_qw_tmpl | ((uint64_t)pkt[0]->data_len << - IDPF_FLEX_TXD_QW1_BUF_SZ_S); + IDPF_TXD_QW1_TX_BUF_SZ_S); __m512i desc0_3 = _mm512_set_epi64 @@ -1187,11 +1186,11 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk uint16_t nb_pkts) { struct idpf_tx_queue *txq = tx_queue; - volatile struct idpf_flex_tx_desc *txdp; + volatile struct idpf_base_tx_desc *txdp; struct idpf_tx_vec_entry *txep; uint16_t n, nb_commit, tx_id; - uint64_t flags = IDPF_TX_FLEX_DESC_CMD_EOP; - uint64_t rs = IDPF_TX_FLEX_DESC_CMD_RS | flags; + uint64_t flags = IDPF_TX_DESC_CMD_EOP; + uint64_t rs = IDPF_TX_DESC_CMD_RS | flags; /* cross rx_thresh boundary is not allowed */ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh); @@ -1238,9 +1237,9 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk tx_id = (uint16_t)(tx_id + nb_commit); if (tx_id > txq->next_rs) { - txq->tx_ring[txq->next_rs].qw1.cmd_dtype |= - rte_cpu_to_le_64(((uint64_t)IDPF_TX_FLEX_DESC_CMD_RS) << - IDPF_FLEX_TXD_QW1_CMD_S); + txq->tx_ring[txq->next_rs].qw1 |= + rte_cpu_to_le_64(((uint64_t)IDPF_TX_DESC_CMD_RS) << + IDPF_TXD_QW1_CMD_S); txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh); } diff --git a/drivers/common/mlx5/linux/mlx5_common_os.c b/drivers/common/mlx5/linux/mlx5_common_os.c index 2ebb8ac8b6e..7260c1a19fd 100644 --- a/drivers/common/mlx5/linux/mlx5_common_os.c +++ b/drivers/common/mlx5/linux/mlx5_common_os.c @@ -266,7 +266,7 @@ mlx5_glue_path(char *buf, size_t size) goto error; return buf; error: - RTE_LOG(ERR, PMD, "unable to append \"-glue\" to last component of" + DRV_LOG(ERR, "unable to append \"-glue\" to last component of" " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"), please" " re-configure DPDK"); return NULL; diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c index 0ad14a48c7a..ca8543e36ec 100644 --- a/drivers/common/mlx5/mlx5_common.c +++ b/drivers/common/mlx5/mlx5_common.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c index 66a77159a03..ff2d6d10b70 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/drivers/common/mlx5/mlx5_devx_cmds.c @@ -543,7 +543,7 @@ mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | MLX5_HCA_CAP_OPMOD_GET_CUR); if (!hcattr) { - RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities"); + DRV_LOG(DEBUG, "Failed to query devx VDPA capabilities"); vdpa_attr->valid = 0; } else { vdpa_attr->valid = 1; @@ -1313,6 +1313,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->tunnel_stateless_gtp = MLX5_GET (per_protocol_networking_offload_caps, hcattr, tunnel_stateless_gtp); + attr->tunnel_stateless_vxlan_gpe_nsh = MLX5_GET + (per_protocol_networking_offload_caps, + hcattr, tunnel_stateless_vxlan_gpe_nsh); attr->rss_ind_tbl_cap = MLX5_GET (per_protocol_networking_offload_caps, hcattr, rss_ind_tbl_cap); diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h index e071cd841f0..11772431aea 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.h +++ b/drivers/common/mlx5/mlx5_devx_cmds.h @@ -196,6 +196,7 @@ struct mlx5_hca_attr { uint32_t tunnel_stateless_geneve_rx:1; uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */ uint32_t tunnel_stateless_gtp:1; + uint32_t tunnel_stateless_vxlan_gpe_nsh:1; uint32_t max_lso_cap; uint32_t scatter_fcs:1; uint32_t lro_cap:1; diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index 51f426c6140..6e181a0ecad 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -761,6 +761,7 @@ enum { MLX5_MODIFICATION_TYPE_REMOVE = 0x5, MLX5_MODIFICATION_TYPE_NOP = 0x6, MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7, + MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8, MLX5_MODIFICATION_TYPE_MAX, }; @@ -857,6 +858,10 @@ enum modify_reg { REG_C_5, REG_C_6, REG_C_7, + REG_C_8, + REG_C_9, + REG_C_10, + REG_C_11, }; /* Modification sub command. */ @@ -1964,7 +1969,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp_lso[0x1]; u8 reserved_at_23[0x8]; u8 tunnel_stateless_gtp[0x1]; - u8 reserved_at_25[0x4]; + u8 reserved_at_25[0x2]; + u8 tunnel_stateless_vxlan_gpe_nsh[0x1]; + u8 reserved_at_28[0x1]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -2080,7 +2087,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reparse[0x1]; u8 reserved_at_6b[0x1]; u8 cross_vhca_object[0x1]; - u8 reserved_at_6d[0xb]; + u8 reformat_l2_to_l3_audp_tunnel[0x1]; + u8 reformat_l3_audp_tunnel_to_l2[0x1]; + u8 ignore_flow_level_rtc_valid[0x1]; + u8 reserved_at_70[0x8]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; u8 log_max_flow_counter[0x8]; @@ -3438,6 +3448,7 @@ enum mlx5_ifc_stc_action_type { MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e, MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12, MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14, + MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b, MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80, MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81, MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82, diff --git a/drivers/common/mlx5/windows/mlx5_win_ext.h b/drivers/common/mlx5/windows/mlx5_win_ext.h index 6f69329a6f1..a71c69bff5f 100644 --- a/drivers/common/mlx5/windows/mlx5_win_ext.h +++ b/drivers/common/mlx5/windows/mlx5_win_ext.h @@ -5,6 +5,8 @@ #ifndef __MLX5_WIN_ETX_H__ #define __MLX5_WIN_ETX_H__ +#include + #include "mlx5_prm.h" #include "mlx5devx.h" diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c index 2675f0d9d13..cbf1e6a988a 100644 --- a/drivers/common/qat/qat_device.c +++ b/drivers/common/qat/qat_device.c @@ -50,6 +50,9 @@ static const struct rte_pci_id pci_id_qat_map[] = { { RTE_PCI_DEVICE(0x8086, 0x4943), }, + { + RTE_PCI_DEVICE(0x8086, 0x4945), + }, {.device_id = 0}, }; @@ -202,6 +205,7 @@ qat_pci_device_allocate(struct rte_pci_device *pci_dev, break; case 0x4941: case 0x4943: + case 0x4945: qat_dev_gen = QAT_GEN4; break; default: diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index f284718441b..f95dd33375a 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -286,7 +286,7 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, queue->msg_size = desc_size; /* For fast calculation of cookie index, relies on msg_size being 2^n */ - queue->trailz = __builtin_ctz(desc_size); + queue->trailz = rte_ctz32(desc_size); /* * Write an unused pattern to the queue memory. diff --git a/drivers/common/sfc_efx/base/efx.h b/drivers/common/sfc_efx/base/efx.h index efefea717f2..3312c2fa8f8 100644 --- a/drivers/common/sfc_efx/base/efx.h +++ b/drivers/common/sfc_efx/base/efx.h @@ -4811,6 +4811,15 @@ efx_mae_encap_header_alloc( __in size_t header_size, __out efx_mae_eh_id_t *eh_idp); +LIBEFX_API +extern __checkReturn efx_rc_t +efx_mae_encap_header_update( + __in efx_nic_t *enp, + __in efx_mae_eh_id_t *eh_idp, + __in efx_tunnel_protocol_t encap_type, + __in_bcount(header_size) const uint8_t *header_data, + __in size_t header_size); + LIBEFX_API extern __checkReturn efx_rc_t efx_mae_encap_header_free( @@ -5318,6 +5327,38 @@ efx_table_entry_delete( __in_bcount(data_size) uint8_t *entry_datap, __in unsigned int data_size); +/* + * Clone the given MAE action set specification + * and drop actions COUNT and DELIVER from it. + */ +LIBEFX_API +extern __checkReturn efx_rc_t +efx_mae_action_set_replay( + __in efx_nic_t *enp, + __in const efx_mae_actions_t *spec_orig, + __out efx_mae_actions_t **spec_clonep); + +/* + * The actual limit may be lower than this. + * This define merely limits the number of + * entries in a single allocation request. + */ +#define EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES 254 + +LIBEFX_API +extern __checkReturn efx_rc_t +efx_mae_action_set_list_alloc( + __in efx_nic_t *enp, + __in unsigned int n_asets, + __in_ecount(n_asets) const efx_mae_aset_id_t *aset_ids, + __out efx_mae_aset_list_id_t *aset_list_idp); + +LIBEFX_API +extern __checkReturn efx_rc_t +efx_mae_action_set_list_free( + __in efx_nic_t *enp, + __in const efx_mae_aset_list_id_t *aset_list_idp); + #ifdef __cplusplus } #endif diff --git a/drivers/common/sfc_efx/base/efx_mae.c b/drivers/common/sfc_efx/base/efx_mae.c index d36cdc71bea..9ae136dcce6 100644 --- a/drivers/common/sfc_efx/base/efx_mae.c +++ b/drivers/common/sfc_efx/base/efx_mae.c @@ -2937,6 +2937,86 @@ efx_mae_encap_header_alloc( EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_mae_encap_header_update( + __in efx_nic_t *enp, + __in efx_mae_eh_id_t *eh_idp, + __in efx_tunnel_protocol_t encap_type, + __in_bcount(header_size) const uint8_t *header_data, + __in size_t header_size) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX_MCDI2, + MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN); + uint32_t encap_type_mcdi; + efx_rc_t rc; + + if (encp->enc_mae_supported == B_FALSE) { + rc = ENOTSUP; + goto fail1; + } + + switch (encap_type) { + case EFX_TUNNEL_PROTOCOL_NONE: + encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_NONE; + break; + case EFX_TUNNEL_PROTOCOL_VXLAN: + encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_VXLAN; + break; + case EFX_TUNNEL_PROTOCOL_GENEVE: + encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_GENEVE; + break; + case EFX_TUNNEL_PROTOCOL_NVGRE: + encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_NVGRE; + break; + default: + rc = ENOTSUP; + goto fail2; + } + + if (header_size > + MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM_MCDI2) { + rc = EINVAL; + goto fail3; + } + + req.emr_cmd = MC_CMD_MAE_ENCAP_HEADER_UPDATE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(header_size); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, + MAE_ENCAP_HEADER_UPDATE_IN_EH_ID, eh_idp->id); + + MCDI_IN_SET_DWORD(req, + MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE, encap_type_mcdi); + + memcpy(MCDI_IN2(req, uint8_t, MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA), + header_data, header_size); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail4; + } + + return (0); + fail4: EFSYS_PROBE(fail4); fail3: @@ -4193,4 +4273,179 @@ efx_mae_read_mport_journal( return (rc); } + __checkReturn efx_rc_t +efx_mae_action_set_replay( + __in efx_nic_t *enp, + __in const efx_mae_actions_t *spec_orig, + __out efx_mae_actions_t **spec_clonep) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + efx_mae_actions_t *spec_clone; + efx_rc_t rc; + + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec_clone), spec_clone); + if (spec_clone == NULL) { + rc = ENOMEM; + goto fail1; + } + + *spec_clone = *spec_orig; + + spec_clone->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID; + spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_COUNT); + spec_clone->ema_n_count_actions = 0; + + (void)efx_mae_mport_invalid(&spec_clone->ema_deliver_mport); + spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_DELIVER); + + *spec_clonep = spec_clone; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_mae_action_set_list_alloc( + __in efx_nic_t *enp, + __in unsigned int n_asets, + __in_ecount(n_asets) const efx_mae_aset_id_t *aset_ids, + __out efx_mae_aset_list_id_t *aset_list_idp) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN); + efx_mae_aset_list_id_t aset_list_id; + efx_mcdi_req_t req; + efx_rc_t rc; + + EFX_STATIC_ASSERT(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES == + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2); + + EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID == + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + + EFX_STATIC_ASSERT(sizeof (aset_list_idp->id) == + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN); + + if (encp->enc_mae_supported == B_FALSE) { + rc = ENOTSUP; + goto fail1; + } + + if (MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets) > + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2) { + rc = EINVAL; + goto fail2; + } + + req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_ALLOC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, + MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, n_asets); + + memcpy(MCDI_IN2(req, uint8_t, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS), + aset_ids, n_asets * sizeof (*aset_ids)); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN) { + rc = EMSGSIZE; + goto fail4; + } + + aset_list_id.id = + MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID); + if (aset_list_id.id == EFX_MAE_RSRC_ID_INVALID) { + rc = ENOENT; + goto fail5; + } + + aset_list_idp->id = aset_list_id.id; + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_mae_action_set_list_free( + __in efx_nic_t *enp, + __in const efx_mae_aset_list_id_t *aset_list_idp) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1), + MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1)); + efx_mcdi_req_t req; + efx_rc_t rc; + + if (encp->enc_mae_supported == B_FALSE) { + rc = ENOTSUP; + goto fail1; + } + + req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_FREE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1); + + MCDI_IN_SET_DWORD(req, + MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, aset_list_idp->id); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail3; + } + + if (MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != + aset_list_idp->id) { + /* Firmware failed to free the action set list. */ + rc = EAGAIN; + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + #endif /* EFSYS_OPT_MAE */ diff --git a/drivers/common/sfc_efx/version.map b/drivers/common/sfc_efx/version.map index 40c97ad2b48..b2b90f55125 100644 --- a/drivers/common/sfc_efx/version.map +++ b/drivers/common/sfc_efx/version.map @@ -97,6 +97,8 @@ INTERNAL { efx_mae_action_set_fill_in_src_mac_id; efx_mae_action_set_free; efx_mae_action_set_get_nb_count; + efx_mae_action_set_list_alloc; + efx_mae_action_set_list_free; efx_mae_action_set_populate_count; efx_mae_action_set_populate_decap; efx_mae_action_set_populate_decr_ip_ttl; @@ -111,6 +113,7 @@ INTERNAL { efx_mae_action_set_populate_set_src_mac; efx_mae_action_set_populate_vlan_pop; efx_mae_action_set_populate_vlan_push; + efx_mae_action_set_replay; efx_mae_action_set_spec_fini; efx_mae_action_set_spec_init; efx_mae_action_set_specs_equal; @@ -123,6 +126,7 @@ INTERNAL { efx_mae_counters_stream_stop; efx_mae_encap_header_alloc; efx_mae_encap_header_free; + efx_mae_encap_header_update; efx_mae_fini; efx_mae_get_limits; efx_mae_init; diff --git a/drivers/crypto/bcmfs/hw/bcmfs4_rm.c b/drivers/crypto/bcmfs/hw/bcmfs4_rm.c index 0ccb111898e..9a30c654dab 100644 --- a/drivers/crypto/bcmfs/hw/bcmfs4_rm.c +++ b/drivers/crypto/bcmfs/hw/bcmfs4_rm.c @@ -473,7 +473,7 @@ bcmfs4_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op) return -ERANGE; } - reqid = pos + __builtin_ctzll(slab); + reqid = pos + rte_ctz64(slab); rte_bitmap_clear(qp->ctx_bmp, reqid); qp->ctx_pool[reqid] = (unsigned long)msg; diff --git a/drivers/crypto/bcmfs/hw/bcmfs5_rm.c b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c index c677c0cd9b5..cbfe42cb471 100644 --- a/drivers/crypto/bcmfs/hw/bcmfs5_rm.c +++ b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c @@ -404,7 +404,7 @@ bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op) return -ERANGE; } - reqid = pos + __builtin_ctzll(slab); + reqid = pos + rte_ctz64(slab); rte_bitmap_clear(qp->ctx_bmp, reqid); qp->ctx_pool[reqid] = (unsigned long)msg; diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c index e405a2ad9ff..5f181e8839f 100644 --- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c +++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c @@ -414,6 +414,8 @@ cn10k_cpt_vec_inst_fill(struct vec_request *vec_req, struct cpt_inst_s *inst, inst->w2.u64 = vec_req->w2; inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req); inst->w4.u64 = w4.u64; + inst->w5.u64 = 0; + inst->w6.u64 = 0; inst->w7.u64 = w7.u64; } @@ -1064,6 +1066,461 @@ cn10k_cpt_dev_info_get(struct rte_cryptodev *dev, } } +static inline int +cn10k_cpt_raw_fill_inst(struct cnxk_iov *iov, struct cnxk_cpt_qp *qp, + struct cnxk_sym_dp_ctx *dp_ctx, struct cpt_inst_s inst[], + struct cpt_inflight_req *infl_req, void *opaque, const bool is_sg_ver2) +{ + struct cnxk_se_sess *sess; + int ret; + + const union cpt_res_s res = { + .cn10k.compcode = CPT_COMP_NOT_DONE, + }; + + inst[0].w0.u64 = 0; + inst[0].w2.u64 = 0; + inst[0].w3.u64 = 0; + + sess = dp_ctx->sess; + + switch (sess->dp_thr_type) { + case CPT_DP_THREAD_TYPE_PT: + ret = fill_raw_passthrough_params(iov, inst); + break; + case CPT_DP_THREAD_TYPE_FC_CHAIN: + ret = fill_raw_fc_params(iov, sess, &qp->meta_info, infl_req, &inst[0], false, + false, is_sg_ver2); + break; + case CPT_DP_THREAD_TYPE_FC_AEAD: + ret = fill_raw_fc_params(iov, sess, &qp->meta_info, infl_req, &inst[0], false, true, + is_sg_ver2); + break; + case CPT_DP_THREAD_AUTH_ONLY: + ret = fill_raw_digest_params(iov, sess, &qp->meta_info, infl_req, &inst[0], + is_sg_ver2); + break; + default: + ret = -EINVAL; + } + + if (unlikely(ret)) + return 0; + + inst[0].res_addr = (uint64_t)&infl_req->res; + __atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED); + infl_req->opaque = opaque; + + inst[0].w7.u64 = sess->cpt_inst_w7; + + return 1; +} + +static uint32_t +cn10k_cpt_raw_enqueue_burst(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, + union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status, + const bool is_sgv2) +{ + uint16_t lmt_id, nb_allowed, nb_ops = vec->num; + uint64_t lmt_base, lmt_arg, io_addr, head; + struct cpt_inflight_req *infl_req; + struct cnxk_cpt_qp *qp = qpair; + struct cnxk_sym_dp_ctx *dp_ctx; + struct pending_queue *pend_q; + uint32_t count = 0, index; + union cpt_fc_write_s fc; + struct cpt_inst_s *inst; + uint64_t *fc_addr; + int ret, i; + + pend_q = &qp->pend_q; + const uint64_t pq_mask = pend_q->pq_mask; + + head = pend_q->head; + nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask); + nb_ops = RTE_MIN(nb_ops, nb_allowed); + + if (unlikely(nb_ops == 0)) + return 0; + + lmt_base = qp->lmtline.lmt_base; + io_addr = qp->lmtline.io_addr; + fc_addr = qp->lmtline.fc_addr; + + const uint32_t fc_thresh = qp->lmtline.fc_thresh; + + ROC_LMT_BASE_ID_GET(lmt_base, lmt_id); + inst = (struct cpt_inst_s *)lmt_base; + + dp_ctx = (struct cnxk_sym_dp_ctx *)drv_ctx; +again: + fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED); + if (unlikely(fc.s.qsize > fc_thresh)) { + i = 0; + goto pend_q_commit; + } + + for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) { + struct cnxk_iov iov; + + index = count + i; + infl_req = &pend_q->req_queue[head]; + infl_req->op_flags = 0; + + cnxk_raw_burst_to_iov(vec, &ofs, index, &iov); + ret = cn10k_cpt_raw_fill_inst(&iov, qp, dp_ctx, &inst[2 * i], infl_req, + user_data[index], is_sgv2); + if (unlikely(ret != 1)) { + plt_dp_err("Could not process vec: %d", index); + if (i == 0 && count == 0) + return -1; + else if (i == 0) + goto pend_q_commit; + else + break; + } + pending_queue_advance(&head, pq_mask); + } + + if (i > PKTS_PER_STEORL) { + lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id; + roc_lmt_submit_steorl(lmt_arg, io_addr); + lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 | + (uint64_t)(lmt_id + PKTS_PER_STEORL); + roc_lmt_submit_steorl(lmt_arg, io_addr); + } else { + lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id; + roc_lmt_submit_steorl(lmt_arg, io_addr); + } + + rte_io_wmb(); + + if (nb_ops - i > 0 && i == PKTS_PER_LOOP) { + nb_ops -= i; + count += i; + goto again; + } + +pend_q_commit: + rte_atomic_thread_fence(__ATOMIC_RELEASE); + + pend_q->head = head; + pend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); + + *enqueue_status = 1; + return count + i; +} + +static uint32_t +cn10k_cpt_raw_enqueue_burst_sgv2(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, + union rte_crypto_sym_ofs ofs, void *user_data[], + int *enqueue_status) +{ + return cn10k_cpt_raw_enqueue_burst(qpair, drv_ctx, vec, ofs, user_data, enqueue_status, + true); +} + +static uint32_t +cn10k_cpt_raw_enqueue_burst_sgv1(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, + union rte_crypto_sym_ofs ofs, void *user_data[], + int *enqueue_status) +{ + return cn10k_cpt_raw_enqueue_burst(qpair, drv_ctx, vec, ofs, user_data, enqueue_status, + false); +} + +static int +cn10k_cpt_raw_enqueue(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, + uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data, + const bool is_sgv2) +{ + uint64_t lmt_base, lmt_arg, io_addr, head; + struct cpt_inflight_req *infl_req; + struct cnxk_cpt_qp *qp = qpair; + struct cnxk_sym_dp_ctx *dp_ctx; + uint16_t lmt_id, nb_allowed; + struct cpt_inst_s *inst; + union cpt_fc_write_s fc; + struct cnxk_iov iov; + uint64_t *fc_addr; + int ret; + + struct pending_queue *pend_q = &qp->pend_q; + const uint64_t pq_mask = pend_q->pq_mask; + const uint32_t fc_thresh = qp->lmtline.fc_thresh; + + head = pend_q->head; + nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask); + + if (unlikely(nb_allowed == 0)) + return -1; + + cnxk_raw_to_iov(data_vec, n_data_vecs, &ofs, iv, digest, aad_or_auth_iv, &iov); + + lmt_base = qp->lmtline.lmt_base; + io_addr = qp->lmtline.io_addr; + fc_addr = qp->lmtline.fc_addr; + + ROC_LMT_BASE_ID_GET(lmt_base, lmt_id); + inst = (struct cpt_inst_s *)lmt_base; + + fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED); + if (unlikely(fc.s.qsize > fc_thresh)) + return -1; + + dp_ctx = (struct cnxk_sym_dp_ctx *)drv_ctx; + infl_req = &pend_q->req_queue[head]; + infl_req->op_flags = 0; + + ret = cn10k_cpt_raw_fill_inst(&iov, qp, dp_ctx, &inst[0], infl_req, user_data, is_sgv2); + if (unlikely(ret != 1)) { + plt_dp_err("Could not process vec"); + return -1; + } + + pending_queue_advance(&head, pq_mask); + + lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id; + roc_lmt_submit_steorl(lmt_arg, io_addr); + + rte_io_wmb(); + + pend_q->head = head; + pend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); + + return 1; +} + +static int +cn10k_cpt_raw_enqueue_sgv2(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, + uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data) +{ + return cn10k_cpt_raw_enqueue(qpair, drv_ctx, data_vec, n_data_vecs, ofs, iv, digest, + aad_or_auth_iv, user_data, true); +} + +static int +cn10k_cpt_raw_enqueue_sgv1(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, + uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data) +{ + return cn10k_cpt_raw_enqueue(qpair, drv_ctx, data_vec, n_data_vecs, ofs, iv, digest, + aad_or_auth_iv, user_data, false); +} + +static inline int +cn10k_cpt_raw_dequeue_post_process(struct cpt_cn10k_res_s *res) +{ + const uint8_t uc_compcode = res->uc_compcode; + const uint8_t compcode = res->compcode; + int ret = 1; + + if (likely(compcode == CPT_COMP_GOOD)) { + if (unlikely(uc_compcode)) + plt_dp_info("Request failed with microcode error: 0x%x", res->uc_compcode); + else + ret = 0; + } + + return ret; +} + +static uint32_t +cn10k_cpt_sym_raw_dequeue_burst(void *qptr, uint8_t *drv_ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + uint32_t max_nb_to_dequeue, + rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, + uint8_t is_user_data_array, uint32_t *n_success, + int *dequeue_status) +{ + struct cpt_inflight_req *infl_req; + struct cnxk_cpt_qp *qp = qptr; + struct pending_queue *pend_q; + uint64_t infl_cnt, pq_tail; + union cpt_res_s res; + int is_op_success; + uint16_t nb_ops; + void *opaque; + int i = 0; + + pend_q = &qp->pend_q; + + const uint64_t pq_mask = pend_q->pq_mask; + + RTE_SET_USED(drv_ctx); + pq_tail = pend_q->tail; + infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask); + + /* Ensure infl_cnt isn't read before data lands */ + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + + infl_req = &pend_q->req_queue[pq_tail]; + + opaque = infl_req->opaque; + if (get_dequeue_count) + nb_ops = get_dequeue_count(opaque); + else + nb_ops = max_nb_to_dequeue; + nb_ops = RTE_MIN(nb_ops, infl_cnt); + + for (i = 0; i < nb_ops; i++) { + is_op_success = 0; + infl_req = &pend_q->req_queue[pq_tail]; + + res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED); + + if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) { + if (unlikely(rte_get_timer_cycles() > pend_q->time_out)) { + plt_err("Request timed out"); + cnxk_cpt_dump_on_err(qp); + pend_q->time_out = rte_get_timer_cycles() + + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); + } + break; + } + + pending_queue_advance(&pq_tail, pq_mask); + + if (!cn10k_cpt_raw_dequeue_post_process(&res.cn10k)) { + is_op_success = 1; + *n_success += 1; + } + + if (is_user_data_array) { + out_user_data[i] = infl_req->opaque; + post_dequeue(out_user_data[i], i, is_op_success); + } else { + if (i == 0) + out_user_data[0] = opaque; + post_dequeue(out_user_data[0], i, is_op_success); + } + + if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF)) + rte_mempool_put(qp->meta_info.pool, infl_req->mdata); + } + + pend_q->tail = pq_tail; + *dequeue_status = 1; + + return i; +} + +static void * +cn10k_cpt_sym_raw_dequeue(void *qptr, uint8_t *drv_ctx, int *dequeue_status, + enum rte_crypto_op_status *op_status) +{ + struct cpt_inflight_req *infl_req; + struct cnxk_cpt_qp *qp = qptr; + struct pending_queue *pend_q; + uint64_t pq_tail; + union cpt_res_s res; + void *opaque = NULL; + + pend_q = &qp->pend_q; + + const uint64_t pq_mask = pend_q->pq_mask; + + RTE_SET_USED(drv_ctx); + + pq_tail = pend_q->tail; + + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + + infl_req = &pend_q->req_queue[pq_tail]; + + res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED); + + if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) { + if (unlikely(rte_get_timer_cycles() > pend_q->time_out)) { + plt_err("Request timed out"); + cnxk_cpt_dump_on_err(qp); + pend_q->time_out = rte_get_timer_cycles() + + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); + } + goto exit; + } + + pending_queue_advance(&pq_tail, pq_mask); + + opaque = infl_req->opaque; + + if (!cn10k_cpt_raw_dequeue_post_process(&res.cn10k)) + *op_status = RTE_CRYPTO_OP_STATUS_SUCCESS; + else + *op_status = RTE_CRYPTO_OP_STATUS_ERROR; + + if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF)) + rte_mempool_put(qp->meta_info.pool, infl_req->mdata); + + *dequeue_status = 1; +exit: + return opaque; +} + +static int +cn10k_sym_get_raw_dp_ctx_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct cnxk_sym_dp_ctx); +} + +static int +cn10k_sym_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *raw_dp_ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update) +{ + struct cnxk_se_sess *sess = (struct cnxk_se_sess *)session_ctx.crypto_sess; + struct cnxk_sym_dp_ctx *dp_ctx; + + if (sess_type != RTE_CRYPTO_OP_WITH_SESSION) + return -ENOTSUP; + + if (sess == NULL) + return -EINVAL; + + if ((sess->dp_thr_type == CPT_DP_THREAD_TYPE_PDCP) || + (sess->dp_thr_type == CPT_DP_THREAD_TYPE_PDCP_CHAIN) || + (sess->dp_thr_type == CPT_DP_THREAD_TYPE_KASUMI) || + (sess->dp_thr_type == CPT_DP_THREAD_TYPE_SM)) + return -ENOTSUP; + + if ((sess->dp_thr_type == CPT_DP_THREAD_AUTH_ONLY) && + ((sess->roc_se_ctx.fc_type == ROC_SE_KASUMI) || + (sess->roc_se_ctx.fc_type == ROC_SE_PDCP))) + return -ENOTSUP; + + if (sess->roc_se_ctx.hash_type == ROC_SE_SHA1_TYPE) + return -ENOTSUP; + + dp_ctx = (struct cnxk_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data; + dp_ctx->sess = sess; + + if (!is_update) { + struct cnxk_cpt_vf *vf; + + raw_dp_ctx->qp_data = (struct cnxk_cpt_qp *)dev->data->queue_pairs[qp_id]; + raw_dp_ctx->dequeue = cn10k_cpt_sym_raw_dequeue; + raw_dp_ctx->dequeue_burst = cn10k_cpt_sym_raw_dequeue_burst; + + vf = dev->data->dev_private; + if (vf->cpt.hw_caps[CPT_ENG_TYPE_SE].sg_ver2 && + vf->cpt.hw_caps[CPT_ENG_TYPE_IE].sg_ver2) { + raw_dp_ctx->enqueue = cn10k_cpt_raw_enqueue_sgv2; + raw_dp_ctx->enqueue_burst = cn10k_cpt_raw_enqueue_burst_sgv2; + } else { + raw_dp_ctx->enqueue = cn10k_cpt_raw_enqueue_sgv1; + raw_dp_ctx->enqueue_burst = cn10k_cpt_raw_enqueue_burst_sgv1; + } + } + + return 0; +} + struct rte_cryptodev_ops cn10k_cpt_ops = { /* Device control ops */ .dev_configure = cnxk_cpt_dev_config, @@ -1090,4 +1547,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = { /* Event crypto ops */ .session_ev_mdata_set = cn10k_cpt_crypto_adapter_ev_mdata_set, .queue_pair_event_error_query = cnxk_cpt_queue_pair_event_error_query, + + /* Raw data-path API related operations */ + .sym_get_raw_dp_ctx_size = cn10k_sym_get_raw_dp_ctx_size, + .sym_configure_raw_dp_ctx = cn10k_sym_configure_raw_dp_ctx, }; diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h index 7ad259b7f48..09468d58b01 100644 --- a/drivers/crypto/cnxk/cnxk_ae.h +++ b/drivers/crypto/cnxk/cnxk_ae.h @@ -193,11 +193,29 @@ cnxk_ae_fill_ec_params(struct cnxk_ae_sess *sess, case RTE_CRYPTO_EC_GROUP_SECP521R1: ec->curveid = ROC_AE_EC_ID_P521; break; + case RTE_CRYPTO_EC_GROUP_SM2: + ec->curveid = ROC_AE_EC_ID_SM2; + break; default: - /* Only NIST curves (FIPS 186-4) are supported */ + /* Only NIST curves (FIPS 186-4) and SM2 are supported */ return -EINVAL; } + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_ECPM) + return 0; + + ec->pkey.length = xform->ec.pkey.length; + if (xform->ec.pkey.length) + rte_memcpy(ec->pkey.data, xform->ec.pkey.data, xform->ec.pkey.length); + + ec->q.x.length = xform->ec.q.x.length; + if (xform->ec.q.x.length) + rte_memcpy(ec->q.x.data, xform->ec.q.x.data, xform->ec.q.x.length); + + ec->q.y.length = xform->ec.q.y.length; + if (xform->ec.q.y.length) + rte_memcpy(ec->q.y.data, xform->ec.q.y.data, xform->ec.q.y.length); + return 0; } @@ -220,6 +238,7 @@ cnxk_ae_fill_session_parameters(struct cnxk_ae_sess *sess, /* Fall through */ case RTE_CRYPTO_ASYM_XFORM_ECPM: case RTE_CRYPTO_ASYM_XFORM_ECFPM: + case RTE_CRYPTO_ASYM_XFORM_SM2: ret = cnxk_ae_fill_ec_params(sess, xform); break; default: @@ -502,10 +521,11 @@ static __rte_always_inline void cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa, struct roc_ae_buf_ptr *meta_buf, uint64_t fpm_table_iova, struct roc_ae_ec_group *ec_grp, - uint8_t curveid, struct cpt_inst_s *inst) + struct cnxk_ae_sess *sess, struct cpt_inst_s *inst) { uint16_t message_len = ecdsa->message.length; - uint16_t pkey_len = ecdsa->pkey.length; + uint16_t pkey_len = sess->ec_ctx.pkey.length; + uint8_t curveid = sess->ec_ctx.curveid; uint16_t p_align, k_align, m_align; uint16_t k_len = ecdsa->k.length; uint16_t order_len, prime_len; @@ -527,7 +547,7 @@ cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa, /* Set write offset for order and private key */ o_offset = prime_len - order_len; - pk_offset = prime_len - pkey_len; + pk_offset = p_align - pkey_len; /* Input buffer */ dptr = meta_buf->vaddr; @@ -555,7 +575,7 @@ cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa, memcpy(dptr + o_offset, ec_grp->order.data, order_len); dptr += p_align; - memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len); + memcpy(dptr + pk_offset, sess->ec_ctx.pkey.data, pkey_len); dptr += p_align; memcpy(dptr, ecdsa->message.data, message_len); @@ -572,7 +592,7 @@ cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa, w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_SIGN; w4.s.param1 = curveid | (message_len << 8); - w4.s.param2 = (pkey_len << 8) | k_len; + w4.s.param2 = (p_align << 8) | k_len; w4.s.dlen = dlen; inst->w4.u64 = w4.u64; @@ -583,13 +603,14 @@ static __rte_always_inline void cnxk_ae_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa, struct roc_ae_buf_ptr *meta_buf, uint64_t fpm_table_iova, - struct roc_ae_ec_group *ec_grp, uint8_t curveid, + struct roc_ae_ec_group *ec_grp, struct cnxk_ae_sess *sess, struct cpt_inst_s *inst) { uint32_t message_len = ecdsa->message.length; + uint16_t qx_len = sess->ec_ctx.q.x.length; + uint16_t qy_len = sess->ec_ctx.q.y.length; + uint8_t curveid = sess->ec_ctx.curveid; uint16_t o_offset, r_offset, s_offset; - uint16_t qx_len = ecdsa->q.x.length; - uint16_t qy_len = ecdsa->q.y.length; uint16_t r_len = ecdsa->r.length; uint16_t s_len = ecdsa->s.length; uint16_t order_len, prime_len; @@ -649,10 +670,10 @@ cnxk_ae_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa, memcpy(dptr, ec_grp->prime.data, prime_len); dptr += p_align; - memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len); + memcpy(dptr + qx_offset, sess->ec_ctx.q.x.data, qx_len); dptr += p_align; - memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len); + memcpy(dptr + qy_offset, sess->ec_ctx.q.y.data, qy_len); dptr += p_align; memcpy(dptr, ec_grp->consta.data, prime_len); @@ -685,10 +706,208 @@ cnxk_ae_enqueue_ecdsa_op(struct rte_crypto_op *op, if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN) cnxk_ae_ecdsa_sign_prep(ecdsa, meta_buf, fpm_iova[curveid], - ec_grp[curveid], curveid, inst); + ec_grp[curveid], sess, inst); else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY) cnxk_ae_ecdsa_verify_prep(ecdsa, meta_buf, fpm_iova[curveid], - ec_grp[curveid], curveid, inst); + ec_grp[curveid], sess, inst); + else { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + return 0; +} + +static __rte_always_inline void +cnxk_ae_sm2_sign_prep(struct rte_crypto_sm2_op_param *sm2, + struct roc_ae_buf_ptr *meta_buf, + uint64_t fpm_table_iova, struct roc_ae_ec_group *ec_grp, + struct cnxk_ae_sess *sess, struct cpt_inst_s *inst) +{ + uint16_t message_len = sm2->message.length; + uint16_t pkey_len = sess->ec_ctx.pkey.length; + uint16_t p_align, k_align, m_align; + uint16_t k_len = sm2->k.length; + uint16_t order_len, prime_len; + uint16_t o_offset, pk_offset; + union cpt_inst_w4 w4; + uint16_t dlen; + uint8_t *dptr; + + prime_len = ec_grp->prime.length; + order_len = ec_grp->order.length; + + /* Truncate input length to curve prime length */ + if (message_len > prime_len) + message_len = prime_len; + m_align = RTE_ALIGN_CEIL(message_len, 8); + + p_align = RTE_ALIGN_CEIL(prime_len, 8); + k_align = RTE_ALIGN_CEIL(k_len, 8); + + /* Set write offset for order and private key */ + o_offset = prime_len - order_len; + pk_offset = p_align - pkey_len; + + /* Input buffer */ + dptr = meta_buf->vaddr; + inst->dptr = (uintptr_t)dptr; + + /* + * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len), + * ROUNDUP8(priv key len, prime len, order len)). + * Please note, private key, order cannot exceed prime + * length i.e 3 * p_align. + */ + dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 5; + + memset(dptr, 0, dlen); + + *(uint64_t *)dptr = fpm_table_iova; + dptr += sizeof(fpm_table_iova); + + rte_memcpy(dptr, sm2->k.data, k_len); + dptr += k_align; + + rte_memcpy(dptr, ec_grp->prime.data, prime_len); + dptr += p_align; + + rte_memcpy(dptr + o_offset, ec_grp->order.data, order_len); + dptr += p_align; + + rte_memcpy(dptr + pk_offset, sess->ec_ctx.pkey.data, pkey_len); + dptr += p_align; + + rte_memcpy(dptr, sm2->message.data, message_len); + dptr += m_align; + + rte_memcpy(dptr, ec_grp->consta.data, prime_len); + dptr += p_align; + + rte_memcpy(dptr, ec_grp->constb.data, prime_len); + dptr += p_align; + + /* Setup opcodes */ + w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA; + w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_SIGN; + + w4.s.param1 = 2 | 1 << 7 | 1 << 6 | (message_len << 8); + w4.s.param2 = (p_align << 8) | k_len; + w4.s.dlen = dlen; + + inst->w4.u64 = w4.u64; + inst->rptr = (uintptr_t)dptr; +} + +static __rte_always_inline void +cnxk_ae_sm2_verify_prep(struct rte_crypto_sm2_op_param *sm2, + struct roc_ae_buf_ptr *meta_buf, + uint64_t fpm_table_iova, + struct roc_ae_ec_group *ec_grp, struct cnxk_ae_sess *sess, + struct cpt_inst_s *inst) +{ + uint32_t message_len = sm2->message.length; + uint16_t o_offset, r_offset, s_offset; + uint16_t qx_len = sess->ec_ctx.q.x.length; + uint16_t qy_len = sess->ec_ctx.q.y.length; + uint16_t r_len = sm2->r.length; + uint16_t s_len = sm2->s.length; + uint16_t order_len, prime_len; + uint16_t qx_offset, qy_offset; + uint16_t p_align, m_align; + union cpt_inst_w4 w4; + uint16_t dlen; + uint8_t *dptr; + + prime_len = ec_grp->prime.length; + order_len = ec_grp->order.length; + + /* Truncate input length to curve prime length */ + if (message_len > prime_len) + message_len = prime_len; + + m_align = RTE_ALIGN_CEIL(message_len, 8); + p_align = RTE_ALIGN_CEIL(prime_len, 8); + + /* Set write offset for sign, order and public key coordinates */ + o_offset = prime_len - order_len; + qx_offset = prime_len - qx_len; + qy_offset = prime_len - qy_len; + r_offset = prime_len - r_len; + s_offset = prime_len - s_len; + + /* Input buffer */ + dptr = meta_buf->vaddr; + inst->dptr = (uintptr_t)dptr; + + /* + * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len), + * ROUNDUP8(sign len(r and s), public key len(x and y coordinates), + * prime len, order len)). + * Please note sign, public key and order can not exceed prime length + * i.e. 6 * p_align + */ + dlen = sizeof(fpm_table_iova) + m_align + (8 * p_align); + + memset(dptr, 0, dlen); + + *(uint64_t *)dptr = fpm_table_iova; + dptr += sizeof(fpm_table_iova); + + rte_memcpy(dptr + r_offset, sm2->r.data, r_len); + dptr += p_align; + + rte_memcpy(dptr + s_offset, sm2->s.data, s_len); + dptr += p_align; + + rte_memcpy(dptr, sm2->message.data, message_len); + dptr += m_align; + + rte_memcpy(dptr + o_offset, ec_grp->order.data, order_len); + dptr += p_align; + + rte_memcpy(dptr, ec_grp->prime.data, prime_len); + dptr += p_align; + + rte_memcpy(dptr + qx_offset, sess->ec_ctx.q.x.data, qx_len); + dptr += p_align; + + rte_memcpy(dptr + qy_offset, sess->ec_ctx.q.y.data, qy_len); + dptr += p_align; + + rte_memcpy(dptr, ec_grp->consta.data, prime_len); + dptr += p_align; + + rte_memcpy(dptr, ec_grp->constb.data, prime_len); + dptr += p_align; + + /* Setup opcodes */ + w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA; + w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_VERIFY; + + w4.s.param1 = 2 | 1 << 7 | 1 << 6 | (message_len << 8); + w4.s.param2 = 0; + w4.s.dlen = dlen; + + inst->w4.u64 = w4.u64; + inst->rptr = (uintptr_t)dptr; +} + +static __rte_always_inline int __rte_hot +cnxk_ae_enqueue_sm2_op(struct rte_crypto_op *op, + struct roc_ae_buf_ptr *meta_buf, + struct cnxk_ae_sess *sess, uint64_t *fpm_iova, + struct roc_ae_ec_group **ec_grp, + struct cpt_inst_s *inst) +{ + struct rte_crypto_sm2_op_param *sm2 = &op->asym->sm2; + uint8_t curveid = sess->ec_ctx.curveid; + + if (sm2->op_type == RTE_CRYPTO_ASYM_OP_SIGN) + cnxk_ae_sm2_sign_prep(sm2, meta_buf, fpm_iova[curveid], + ec_grp[curveid], sess, inst); + else if (sm2->op_type == RTE_CRYPTO_ASYM_OP_VERIFY) + cnxk_ae_sm2_verify_prep(sm2, meta_buf, fpm_iova[curveid], + ec_grp[curveid], sess, inst); else { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; @@ -898,6 +1117,23 @@ cnxk_ae_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa, uint8_t *rptr, ecdsa->s.length = prime_len; } +static __rte_always_inline void +cnxk_ae_dequeue_sm2_op(struct rte_crypto_sm2_op_param *sm2, uint8_t *rptr, + struct roc_ae_ec_ctx *ec, + struct roc_ae_ec_group **ec_grp) +{ + int prime_len = ec_grp[ec->curveid]->prime.length; + + if (sm2->op_type == RTE_CRYPTO_ASYM_OP_VERIFY) + return; + + /* Separate out sign r and s components */ + rte_memcpy(sm2->r.data, rptr, prime_len); + rte_memcpy(sm2->s.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len); + sm2->r.length = prime_len; + sm2->s.length = prime_len; +} + static __rte_always_inline void cnxk_ae_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm, uint8_t *rptr, struct roc_ae_ec_ctx *ec, @@ -966,6 +1202,13 @@ cnxk_ae_enqueue(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, if (unlikely(ret)) goto req_fail; break; + case RTE_CRYPTO_ASYM_XFORM_SM2: + ret = cnxk_ae_enqueue_sm2_op(op, &meta_buf, sess, + sess->cnxk_fpm_iova, + sess->ec_grp, inst); + if (unlikely(ret)) + goto req_fail; + break; case RTE_CRYPTO_ASYM_XFORM_ECPM: ret = cnxk_ae_ecpm_prep(&asym_op->ecpm, &meta_buf, sess->ec_grp[sess->ec_ctx.curveid], @@ -1015,6 +1258,10 @@ cnxk_ae_post_process(struct rte_crypto_op *cop, struct cnxk_ae_sess *sess, cnxk_ae_dequeue_ecdsa_op(&op->ecdsa, rptr, &sess->ec_ctx, sess->ec_grp); break; + case RTE_CRYPTO_ASYM_XFORM_SM2: + cnxk_ae_dequeue_sm2_op(&op->sm2, rptr, &sess->ec_ctx, + sess->ec_grp); + break; case RTE_CRYPTO_ASYM_XFORM_ECPM: case RTE_CRYPTO_ASYM_XFORM_ECFPM: cnxk_ae_dequeue_ecpm_op(&op->ecpm, rptr, &sess->ec_ctx, diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.c b/drivers/crypto/cnxk/cnxk_cryptodev.c index 4fa1907cea4..4819a141842 100644 --- a/drivers/crypto/cnxk/cnxk_cryptodev.c +++ b/drivers/crypto/cnxk/cnxk_cryptodev.c @@ -13,22 +13,16 @@ uint64_t cnxk_cpt_default_ff_get(void) { - uint64_t ff = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_HW_ACCELERATED | - RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT | + uint64_t ff = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT | RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_IN_PLACE_SGL | - RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | - RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | - RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | - RTE_CRYPTODEV_FF_SYM_SESSIONLESS | - RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED | - RTE_CRYPTODEV_FF_SECURITY; + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | RTE_CRYPTODEV_FF_SYM_SESSIONLESS | + RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED | RTE_CRYPTODEV_FF_SECURITY; if (roc_model_is_cn10k()) - ff |= RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM; + ff |= RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM | RTE_CRYPTODEV_FF_SYM_RAW_DP; return ff; } diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.h b/drivers/crypto/cnxk/cnxk_cryptodev.h index 09f5ba0650c..9a321aa8c92 100644 --- a/drivers/crypto/cnxk/cnxk_cryptodev.h +++ b/drivers/crypto/cnxk/cnxk_cryptodev.h @@ -13,7 +13,7 @@ #define CNXK_CPT_MAX_CAPS 54 #define CNXK_SEC_CRYPTO_MAX_CAPS 16 #define CNXK_SEC_MAX_CAPS 9 -#define CNXK_AE_EC_ID_MAX 8 +#define CNXK_AE_EC_ID_MAX 9 /** * Device private data */ diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c index 4c6357353e6..b4864f66bfe 100644 --- a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c +++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c @@ -1152,6 +1152,20 @@ static const struct rte_cryptodev_capabilities caps_sm4[] = { }, }; +static const struct rte_cryptodev_capabilities caps_sm2[] = { + { /* SM2 */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_SM2, + .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | + (1 << RTE_CRYPTO_ASYM_OP_VERIFY)) + } + } + } + } +}; + static const struct rte_cryptodev_capabilities caps_end[] = { RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -1180,8 +1194,8 @@ static const struct rte_cryptodev_capabilities sec_caps_aes[] = { .increment = 4 }, .iv_size = { - .min = 11, - .max = 11, + .min = 12, + .max = 12, .increment = 0 } }, } @@ -1210,8 +1224,8 @@ static const struct rte_cryptodev_capabilities sec_caps_aes[] = { .increment = 4 }, .iv_size = { - .min = 12, - .max = 12, + .min = 11, + .max = 11, .increment = 0 } }, } @@ -1619,10 +1633,13 @@ static void cn10k_crypto_caps_add(struct rte_cryptodev_capabilities cnxk_caps[], union cpt_eng_caps *hw_caps, int *cur_pos) { - if (hw_caps->sg_ver2) { + if (hw_caps[CPT_ENG_TYPE_SE].sg_ver2) { CPT_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, sm3); CPT_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, sm4); } + + if (hw_caps[CPT_ENG_TYPE_AE].sm2) + CPT_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, sm2); } static void diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c index 50150d3f066..82938c77c89 100644 --- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c +++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c @@ -490,11 +490,12 @@ cnxk_sess_fill(struct roc_cpt *roc_cpt, struct rte_crypto_sym_xform *xform, struct rte_crypto_sym_xform *aead_xfrm = NULL; struct rte_crypto_sym_xform *c_xfrm = NULL; struct rte_crypto_sym_xform *a_xfrm = NULL; - bool pdcp_chain_supported = false; bool ciph_then_auth = false; - if (roc_cpt->hw_caps[CPT_ENG_TYPE_SE].pdcp_chain) - pdcp_chain_supported = true; + if (roc_cpt->hw_caps[CPT_ENG_TYPE_SE].pdcp_chain_zuc256) + sess->roc_se_ctx.pdcp_iv_offset = 24; + else + sess->roc_se_ctx.pdcp_iv_offset = 16; if (xform == NULL) return -EINVAL; @@ -591,8 +592,7 @@ cnxk_sess_fill(struct roc_cpt *roc_cpt, struct rte_crypto_sym_xform *xform, case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_ZUC_EIA3: case RTE_CRYPTO_AUTH_AES_CMAC: - if (!pdcp_chain_supported || - !is_valid_pdcp_cipher_alg(c_xfrm, sess)) + if (!is_valid_pdcp_cipher_alg(c_xfrm, sess)) return -ENOTSUP; break; default: @@ -627,8 +627,7 @@ cnxk_sess_fill(struct roc_cpt *roc_cpt, struct rte_crypto_sym_xform *xform, case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_ZUC_EIA3: case RTE_CRYPTO_AUTH_AES_CMAC: - if (!pdcp_chain_supported || - !is_valid_pdcp_cipher_alg(c_xfrm, sess)) + if (!is_valid_pdcp_cipher_alg(c_xfrm, sess)) return -ENOTSUP; break; default: diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h index 6ee4cbda703..c6bb8023eac 100644 --- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h +++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h @@ -16,8 +16,9 @@ #include "roc_errata.h" #include "roc_se.h" -#define CNXK_CPT_MIN_HEADROOM_REQ 32 -#define CNXK_CPT_MIN_TAILROOM_REQ 102 +/* Space for ctrl_word(8B), IV(48B), passthrough alignment(8B) */ +#define CNXK_CPT_MIN_HEADROOM_REQ 64 +#define CNXK_CPT_MIN_TAILROOM_REQ 102 /* Default command timeout in seconds */ #define DEFAULT_COMMAND_TIMEOUT 4 @@ -44,6 +45,7 @@ struct cpt_qp_meta_info { struct cpt_inflight_req { union cpt_res_s res; union { + void *opaque; struct rte_crypto_op *cop; struct rte_event_vector *vec; }; diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h index 75c1dce231b..c2a807fa942 100644 --- a/drivers/crypto/cnxk/cnxk_se.h +++ b/drivers/crypto/cnxk/cnxk_se.h @@ -31,30 +31,31 @@ enum cpt_dp_thread_type { struct cnxk_se_sess { struct rte_cryptodev_sym_session rte_sess; - uint16_t cpt_op : 4; - uint16_t zsk_flag : 4; - uint16_t aes_gcm : 1; - uint16_t aes_ccm : 1; - uint16_t aes_ctr : 1; - uint16_t chacha_poly : 1; - uint16_t is_null : 1; - uint16_t is_gmac : 1; - uint16_t chained_op : 1; - uint16_t auth_first : 1; - uint16_t aes_ctr_eea2 : 1; - uint16_t zs_cipher : 4; - uint16_t zs_auth : 4; - uint16_t dp_thr_type : 8; - uint16_t aad_length; + uint8_t aes_gcm : 1; + uint8_t aes_ccm : 1; + uint8_t aes_ctr : 1; + uint8_t chacha_poly : 1; + uint8_t is_null : 1; + uint8_t is_gmac : 1; + uint8_t chained_op : 1; + uint8_t auth_first : 1; + uint8_t aes_ctr_eea2 : 1; uint8_t is_sha3 : 1; uint8_t short_iv : 1; uint8_t is_sm3 : 1; uint8_t passthrough : 1; uint8_t is_sm4 : 1; - uint8_t rsvd : 3; + uint8_t cipher_only : 1; + uint8_t rsvd : 1; + uint8_t cpt_op : 4; + uint8_t zsk_flag : 4; + uint8_t zs_cipher : 4; + uint8_t zs_auth : 4; + uint8_t dp_thr_type; uint8_t mac_len; uint8_t iv_length; uint8_t auth_iv_length; + uint16_t aad_length; uint16_t iv_offset; uint16_t auth_iv_offset; uint32_t salt; @@ -65,6 +66,23 @@ struct cnxk_se_sess { struct roc_cpt_lf *lf; } __rte_aligned(ROC_ALIGN); +struct cnxk_sym_dp_ctx { + struct cnxk_se_sess *sess; +}; + +struct cnxk_iov { + char src[SRC_IOV_SIZE]; + char dst[SRC_IOV_SIZE]; + void *iv_buf; + void *aad_buf; + void *mac_buf; + uint16_t c_head; + uint16_t c_tail; + uint16_t a_head; + uint16_t a_tail; + int data_len; +}; + static __rte_always_inline int fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess); @@ -84,10 +102,10 @@ cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst) } static inline void -pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type, - uint8_t pack_iv) +pdcp_iv_copy(uint8_t *iv_d, const uint8_t *iv_s, const uint8_t pdcp_alg_type, uint8_t pack_iv) { - uint32_t *iv_s_temp, iv_temp[4]; + const uint32_t *iv_s_temp; + uint32_t iv_temp[4]; int j; if (unlikely(iv_s == NULL)) { @@ -101,18 +119,37 @@ pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type, * and BigEndian, MC needs it as IV0 IV1 IV2 IV3 */ - iv_s_temp = (uint32_t *)iv_s; + iv_s_temp = (const uint32_t *)iv_s; for (j = 0; j < 4; j++) iv_temp[j] = iv_s_temp[3 - j]; memcpy(iv_d, iv_temp, 16); } else if ((pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_ZUC) || pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_AES_CTR) { + memcpy(iv_d, iv_s, 16); if (pack_iv) { - cpt_pack_iv(iv_s, iv_d); - memcpy(iv_d + 6, iv_s + 8, 17); - } else - memcpy(iv_d, iv_s, 16); + uint8_t iv_d23, iv_d24; + + /* Save last two bytes as only 23B IV space is available */ + iv_d23 = iv_d[23]; + iv_d24 = iv_d[24]; + + /* Copy remaining part of IV */ + memcpy(iv_d + 16, iv_s + 16, 25 - 16); + + /* Swap IV */ + roc_se_zuc_bytes_swap(iv_d, 25); + + /* Pack IV */ + cpt_pack_iv(iv_d, iv_d); + + /* Move IV */ + for (j = 6; j < 23; j++) + iv_d[j] = iv_d[j + 2]; + + iv_d[23] = iv_d23; + iv_d[24] = iv_d24; + } } } @@ -221,9 +258,9 @@ cpt_mac_len_verify(struct rte_crypto_auth_xform *auth) static __rte_always_inline int sg_inst_prep(struct roc_se_fc_params *params, struct cpt_inst_s *inst, uint64_t offset_ctrl, - uint8_t *iv_s, int iv_len, uint8_t pack_iv, uint8_t pdcp_alg_type, int32_t inputlen, - int32_t outputlen, uint32_t passthrough_len, uint32_t req_flags, int pdcp_flag, - int decrypt) + const uint8_t *iv_s, int iv_len, uint8_t pack_iv, uint8_t pdcp_alg_type, + int32_t inputlen, int32_t outputlen, uint32_t passthrough_len, uint32_t req_flags, + int pdcp_flag, int decrypt) { struct roc_sglist_comp *gather_comp, *scatter_comp; void *m_vaddr = params->meta_buf.vaddr; @@ -412,9 +449,9 @@ sg_inst_prep(struct roc_se_fc_params *params, struct cpt_inst_s *inst, uint64_t static __rte_always_inline int sg2_inst_prep(struct roc_se_fc_params *params, struct cpt_inst_s *inst, uint64_t offset_ctrl, - uint8_t *iv_s, int iv_len, uint8_t pack_iv, uint8_t pdcp_alg_type, int32_t inputlen, - int32_t outputlen, uint32_t passthrough_len, uint32_t req_flags, int pdcp_flag, - int decrypt) + const uint8_t *iv_s, int iv_len, uint8_t pack_iv, uint8_t pdcp_alg_type, + int32_t inputlen, int32_t outputlen, uint32_t passthrough_len, uint32_t req_flags, + int pdcp_flag, int decrypt) { struct roc_sg2list_comp *gather_comp, *scatter_comp; void *m_vaddr = params->meta_buf.vaddr; @@ -831,9 +868,9 @@ cpt_digest_gen_sg_ver2_prep(uint32_t flags, uint64_t d_lens, struct roc_se_fc_pa static inline int pdcp_chain_sg1_prep(struct roc_se_fc_params *params, struct roc_se_ctx *cpt_ctx, struct cpt_inst_s *inst, union cpt_inst_w4 w4, int32_t inputlen, - uint8_t hdr_len, uint64_t offset_ctrl, uint32_t req_flags, uint8_t *cipher_iv, - uint8_t *auth_iv, const int pack_iv, const uint8_t pdcp_ci_alg, - const uint8_t pdcp_auth_alg) + uint8_t hdr_len, uint64_t offset_ctrl, uint32_t req_flags, + const uint8_t *cipher_iv, const uint8_t *auth_iv, const int pack_iv, + const uint8_t pdcp_ci_alg, const uint8_t pdcp_auth_alg) { struct roc_sglist_comp *scatter_comp, *gather_comp; void *m_vaddr = params->meta_buf.vaddr; @@ -873,7 +910,7 @@ pdcp_chain_sg1_prep(struct roc_se_fc_params *params, struct roc_se_ctx *cpt_ctx, pdcp_iv_copy(iv_d, cipher_iv, pdcp_ci_alg, pack_iv); /* Auth IV */ - iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN + 16); + iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN + params->pdcp_iv_offset); pdcp_iv_copy(iv_d, auth_iv, pdcp_auth_alg, pack_iv); /* input data */ @@ -940,9 +977,9 @@ pdcp_chain_sg1_prep(struct roc_se_fc_params *params, struct roc_se_ctx *cpt_ctx, static inline int pdcp_chain_sg2_prep(struct roc_se_fc_params *params, struct roc_se_ctx *cpt_ctx, struct cpt_inst_s *inst, union cpt_inst_w4 w4, int32_t inputlen, - uint8_t hdr_len, uint64_t offset_ctrl, uint32_t req_flags, uint8_t *cipher_iv, - uint8_t *auth_iv, const int pack_iv, const uint8_t pdcp_ci_alg, - const uint8_t pdcp_auth_alg) + uint8_t hdr_len, uint64_t offset_ctrl, uint32_t req_flags, + const uint8_t *cipher_iv, const uint8_t *auth_iv, const int pack_iv, + const uint8_t pdcp_ci_alg, const uint8_t pdcp_auth_alg) { struct roc_sg2list_comp *gather_comp, *scatter_comp; void *m_vaddr = params->meta_buf.vaddr; @@ -979,7 +1016,7 @@ pdcp_chain_sg2_prep(struct roc_se_fc_params *params, struct roc_se_ctx *cpt_ctx, pdcp_iv_copy(iv_d, cipher_iv, pdcp_ci_alg, pack_iv); /* Auth IV */ - iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN + 16); + iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN + params->pdcp_iv_offset); pdcp_iv_copy(iv_d, auth_iv, pdcp_auth_alg, pack_iv); /* input data */ @@ -1051,12 +1088,12 @@ cpt_sm_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_p int32_t inputlen, outputlen, enc_dlen; union cpt_inst_w4 cpt_inst_w4; uint32_t passthrough_len = 0; + const uint8_t *src = NULL; struct roc_se_ctx *se_ctx; uint32_t encr_data_len; uint32_t encr_offset; uint64_t offset_ctrl; uint8_t iv_len = 16; - uint8_t *src = NULL; void *offset_vaddr; int ret; @@ -1070,7 +1107,6 @@ cpt_sm_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_p iv_len = 0; encr_offset += iv_len; - enc_dlen = encr_data_len + encr_offset; enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset; inputlen = enc_dlen; @@ -1110,7 +1146,7 @@ cpt_sm_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_p if (likely(iv_len)) { void *dst = PLT_PTR_ADD(offset_vaddr, ROC_SE_OFF_CTRL_LEN); - uint64_t *src = fc_params->iv_buf; + const uint64_t *src = fc_params->iv_buf; rte_memcpy(dst, src, 16); } @@ -1143,20 +1179,19 @@ cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst, const bool is_sg_ver2) { - uint32_t iv_offset = 0; + uint32_t encr_data_len, auth_data_len, aad_len = 0; + uint32_t encr_offset, auth_offset, iv_offset = 0; int32_t inputlen, outputlen, enc_dlen, auth_dlen; - struct roc_se_ctx *se_ctx; uint32_t cipher_type, hash_type; - uint32_t mac_len; - uint8_t iv_len = 16; - uint32_t encr_offset, auth_offset; - uint64_t offset_ctrl; - uint32_t encr_data_len, auth_data_len, aad_len = 0; - uint32_t passthrough_len = 0; union cpt_inst_w4 cpt_inst_w4; + uint32_t passthrough_len = 0; + const uint8_t *src = NULL; + struct roc_se_ctx *se_ctx; + uint64_t offset_ctrl; + uint8_t iv_len = 16; void *offset_vaddr; uint8_t op_minor; - uint8_t *src = NULL; + uint32_t mac_len; int ret; encr_offset = ROC_SE_ENCR_OFFSET(d_offs); @@ -1280,7 +1315,7 @@ cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, if (likely(iv_len)) { uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN); - uint64_t *src = fc_params->iv_buf; + const uint64_t *src = fc_params->iv_buf; dest[0] = src[0]; dest[1] = src[1]; } @@ -1313,19 +1348,18 @@ cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst, const bool is_sg_ver2) { - uint32_t iv_offset = 0; + uint32_t encr_data_len, auth_data_len, aad_len = 0; + uint32_t encr_offset, auth_offset, iv_offset = 0; int32_t inputlen, outputlen, enc_dlen, auth_dlen; - struct roc_se_ctx *se_ctx; + union cpt_inst_w4 cpt_inst_w4; + uint32_t passthrough_len = 0; int32_t hash_type, mac_len; + const uint8_t *src = NULL; + struct roc_se_ctx *se_ctx; + uint64_t offset_ctrl; uint8_t iv_len = 16; - uint32_t encr_offset, auth_offset; - uint32_t encr_data_len, auth_data_len, aad_len = 0; - uint32_t passthrough_len = 0; - union cpt_inst_w4 cpt_inst_w4; void *offset_vaddr; uint8_t op_minor; - uint64_t offset_ctrl; - uint8_t *src = NULL; int ret; encr_offset = ROC_SE_ENCR_OFFSET(d_offs); @@ -1438,7 +1472,7 @@ cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, if (likely(iv_len)) { uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN); - uint64_t *src = fc_params->iv_buf; + const uint64_t *src = fc_params->iv_buf; dest[0] = src[0]; dest[1] = src[1]; } @@ -1473,12 +1507,13 @@ cpt_pdcp_chain_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, { uint32_t encr_data_len, auth_data_len, aad_len, passthr_len, pad_len, hdr_len; uint32_t encr_offset, auth_offset, iv_offset = 0; - uint8_t *auth_iv = NULL, *cipher_iv = NULL; + const uint8_t *auth_iv = NULL, *cipher_iv = NULL; + uint8_t pdcp_iv_off = params->pdcp_iv_offset; + const int iv_len = pdcp_iv_off * 2; uint8_t pdcp_ci_alg, pdcp_auth_alg; union cpt_inst_w4 cpt_inst_w4; struct roc_se_ctx *se_ctx; uint64_t *offset_vaddr; - const int iv_len = 32; uint64_t offset_ctrl; uint8_t pack_iv = 0; int32_t inputlen; @@ -1560,7 +1595,7 @@ cpt_pdcp_chain_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN); pdcp_iv_copy(iv_d, cipher_iv, pdcp_ci_alg, pack_iv); - iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN + 16); + iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN + pdcp_iv_off); pdcp_iv_copy(iv_d, auth_iv, pdcp_auth_alg, pack_iv); inst->w4.u64 = cpt_inst_w4.u64; @@ -1582,18 +1617,18 @@ static __rte_always_inline int cpt_pdcp_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_params *params, struct cpt_inst_s *inst, const bool is_sg_ver2) { + uint32_t encr_data_len, auth_data_len; + uint32_t encr_offset, auth_offset; + union cpt_inst_w4 cpt_inst_w4; int32_t inputlen, outputlen; struct roc_se_ctx *se_ctx; - uint32_t mac_len = 0; - uint8_t pdcp_alg_type; - uint32_t encr_offset, auth_offset; - uint32_t encr_data_len, auth_data_len; - int flags, iv_len; - uint64_t offset_ctrl; uint64_t *offset_vaddr; - uint8_t *iv_s; + uint8_t pdcp_alg_type; + uint32_t mac_len = 0; + const uint8_t *iv_s; uint8_t pack_iv = 0; - union cpt_inst_w4 cpt_inst_w4; + uint64_t offset_ctrl; + int flags, iv_len; int ret; se_ctx = params->ctx; @@ -1618,7 +1653,6 @@ cpt_pdcp_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, iv_len = params->auth_iv_len; if (iv_len == 25) { - roc_se_zuc_bytes_swap(iv_s, iv_len); iv_len -= 2; pack_iv = 1; } @@ -1654,7 +1688,6 @@ cpt_pdcp_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, pdcp_alg_type = se_ctx->pdcp_ci_alg; if (iv_len == 25) { - roc_se_zuc_bytes_swap(iv_s, iv_len); iv_len -= 2; pack_iv = 1; } @@ -1740,16 +1773,16 @@ static __rte_always_inline int cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_params *params, struct cpt_inst_s *inst, const bool is_sg_ver2) { + uint32_t encr_data_len, auth_data_len; int32_t inputlen = 0, outputlen = 0; + uint32_t encr_offset, auth_offset; + const uint8_t *iv_s, iv_len = 8; + union cpt_inst_w4 cpt_inst_w4; struct roc_se_ctx *se_ctx; + uint64_t offset_ctrl; uint32_t mac_len = 0; - uint32_t encr_offset, auth_offset; - uint32_t encr_data_len, auth_data_len; - int flags; - uint8_t *iv_s, iv_len = 8; uint8_t dir = 0; - uint64_t offset_ctrl; - union cpt_inst_w4 cpt_inst_w4; + int flags; encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8; auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8; @@ -1757,17 +1790,15 @@ cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, auth_data_len = ROC_SE_AUTH_DLEN(d_lens); se_ctx = params->ctx; - iv_s = params->iv_buf; flags = se_ctx->zsk_flags; mac_len = se_ctx->mac_len; - dir = iv_s[8] & 0x1; cpt_inst_w4.u64 = se_ctx->template_w4.u64; if (flags == 0x0) { + iv_s = params->iv_buf; /* Consider IV len */ encr_offset += iv_len; - auth_offset += iv_len; inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8); outputlen = inputlen; @@ -1779,6 +1810,9 @@ cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, return -1; } } else { + iv_s = params->auth_iv_buf; + dir = iv_s[8] & 0x1; + inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8); outputlen = mac_len; /* iv offset is 0 */ @@ -1800,6 +1834,10 @@ cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens, cpt_inst_w4.s.param2 = auth_data_len; inst->w4.u64 = cpt_inst_w4.u64; + + if (unlikely(iv_s == NULL)) + return -1; + if (is_sg_ver2) sg2_inst_prep(params, inst, offset_ctrl, iv_s, iv_len, 0, 0, inputlen, outputlen, 0, req_flags, 0, 0); @@ -1856,6 +1894,10 @@ cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_params *p } inst->w4.u64 = cpt_inst_w4.u64; + + if (unlikely(params->iv_buf == NULL)) + return -1; + if (is_sg_ver2) sg2_inst_prep(params, inst, offset_ctrl, params->iv_buf, iv_len, 0, 0, inputlen, outputlen, 0, 0, 0, 1); @@ -2176,6 +2218,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess) } } + sess->cipher_only = 1; sess->zsk_flag = zsk_flag; sess->zs_cipher = zs_cipher; sess->aes_gcm = 0; @@ -2543,11 +2586,6 @@ fill_sm_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, char src[SRC_IOV_SIZE]; char dst[SRC_IOV_SIZE]; void *mdata = NULL; -#ifdef CPT_ALWAYS_USE_SG_MODE - uint8_t inplace = 0; -#else - uint8_t inplace = 1; -#endif uint32_t flags = 0; int ret; @@ -2577,11 +2615,9 @@ fill_sm_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, fc_params.ctx = &sess->roc_se_ctx; - if (likely(!m_dst && inplace)) { + if (m_dst == NULL) { fc_params.dst_iov = fc_params.src_iov = (void *)src; - prepare_iov_from_pkt_inplace(m_src, &fc_params, &flags); - } else { /* Out of place processing */ fc_params.src_iov = (void *)src; @@ -2594,14 +2630,10 @@ fill_sm_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, goto err_exit; } - if (unlikely(m_dst != NULL)) { - if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) { - plt_dp_err("Prepare dst iov failed for m_dst %p", m_dst); - ret = -EINVAL; - goto err_exit; - } - } else { - fc_params.dst_iov = (void *)src; + if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) { + plt_dp_err("Prepare dst iov failed for m_dst %p", m_dst); + ret = -EINVAL; + goto err_exit; } } @@ -2877,6 +2909,7 @@ fill_passthrough_params(struct rte_crypto_op *cop, struct cpt_inst_s *inst) inst->w0.u64 = 0; inst->w5.u64 = 0; + inst->w6.u64 = 0; inst->w4.u64 = w4.u64; return 0; @@ -2904,6 +2937,7 @@ fill_pdcp_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, fc_params.auth_iv_len = 0; fc_params.iv_buf = NULL; fc_params.auth_iv_buf = NULL; + fc_params.pdcp_iv_offset = sess->roc_se_ctx.pdcp_iv_offset; if (likely(sess->iv_length)) fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *, sess->iv_offset); @@ -2990,6 +3024,7 @@ fill_pdcp_chain_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, fc_params.auth_iv_len = sess->auth_iv_length; fc_params.iv_buf = NULL; fc_params.auth_iv_buf = NULL; + fc_params.pdcp_iv_offset = sess->roc_se_ctx.pdcp_iv_offset; m_src = sym_op->m_src; m_dst = sym_op->m_dst; @@ -3192,8 +3227,9 @@ fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, d_offs = auth_range_off; auth_range_off = 0; params.auth_iv_len = sess->auth_iv_length; - params.auth_iv_buf = rte_crypto_op_ctod_offset( - cop, uint8_t *, sess->auth_iv_offset); + params.auth_iv_buf = + rte_crypto_op_ctod_offset(cop, uint8_t *, sess->auth_iv_offset); + params.pdcp_iv_offset = sess->roc_se_ctx.pdcp_iv_offset; if (sess->zsk_flag == ROC_SE_K_F9) { uint32_t length_in_bits, num_bytes; uint8_t *src, direction = 0; @@ -3217,7 +3253,7 @@ fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess, /* Store it at end of auth iv */ iv_buf[8] = direction; - params.iv_buf = iv_buf; + params.auth_iv_buf = iv_buf; } } @@ -3299,9 +3335,19 @@ static __rte_always_inline int __rte_hot cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst, const bool is_sg_ver2) { + enum cpt_dp_thread_type dp_thr_type; int ret; - switch (sess->dp_thr_type) { + dp_thr_type = sess->dp_thr_type; + + /* + * With cipher only, microcode expects that cipher length is non-zero. To accept such + * instructions, send to CPT as passthrough. + */ + if (unlikely(sess->cipher_only && op->sym->cipher.data.length == 0)) + dp_thr_type = CPT_DP_THREAD_TYPE_PT; + + switch (dp_thr_type) { case CPT_DP_THREAD_TYPE_PT: ret = fill_passthrough_params(op, inst); break; @@ -3337,4 +3383,289 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_ return ret; } +static __rte_always_inline uint32_t +prepare_iov_from_raw_vec(struct rte_crypto_vec *vec, struct roc_se_iov_ptr *iovec, uint32_t num) +{ + uint32_t i, total_len = 0; + + for (i = 0; i < num; i++) { + iovec->bufs[i].vaddr = vec[i].base; + iovec->bufs[i].size = vec[i].len; + + total_len += vec[i].len; + } + + iovec->buf_cnt = i; + return total_len; +} + +static __rte_always_inline void +cnxk_raw_burst_to_iov(struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs *ofs, int index, + struct cnxk_iov *iov) +{ + iov->iv_buf = vec->iv[index].va; + iov->aad_buf = vec->aad[index].va; + iov->mac_buf = vec->digest[index].va; + + iov->data_len = + prepare_iov_from_raw_vec(vec->src_sgl[index].vec, (struct roc_se_iov_ptr *)iov->src, + vec->src_sgl[index].num); + + if (vec->dest_sgl == NULL) + prepare_iov_from_raw_vec(vec->src_sgl[index].vec, (struct roc_se_iov_ptr *)iov->dst, + vec->src_sgl[index].num); + else + prepare_iov_from_raw_vec(vec->dest_sgl[index].vec, + (struct roc_se_iov_ptr *)iov->dst, + vec->dest_sgl[index].num); + + iov->c_head = ofs->ofs.cipher.head; + iov->c_tail = ofs->ofs.cipher.tail; + + iov->a_head = ofs->ofs.auth.head; + iov->a_tail = ofs->ofs.auth.tail; +} + +static __rte_always_inline void +cnxk_raw_to_iov(struct rte_crypto_vec *data_vec, uint16_t n_vecs, union rte_crypto_sym_ofs *ofs, + struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, struct cnxk_iov *iov) +{ + iov->iv_buf = iv->va; + iov->aad_buf = aad->va; + iov->mac_buf = digest->va; + + iov->data_len = + prepare_iov_from_raw_vec(data_vec, (struct roc_se_iov_ptr *)iov->src, n_vecs); + prepare_iov_from_raw_vec(data_vec, (struct roc_se_iov_ptr *)iov->dst, n_vecs); + + iov->c_head = ofs->ofs.cipher.head; + iov->c_tail = ofs->ofs.cipher.tail; + + iov->a_head = ofs->ofs.auth.head; + iov->a_tail = ofs->ofs.auth.tail; +} + +static inline void +raw_memcpy(struct cnxk_iov *iov) +{ + struct roc_se_iov_ptr *src = (struct roc_se_iov_ptr *)iov->src; + struct roc_se_iov_ptr *dst = (struct roc_se_iov_ptr *)iov->dst; + int num = src->buf_cnt; + int i; + + /* skip copy in case of inplace */ + if (dst->bufs[0].vaddr == src->bufs[0].vaddr) + return; + + for (i = 0; i < num; i++) { + rte_memcpy(dst->bufs[i].vaddr, src->bufs[i].vaddr, src->bufs[i].size); + dst->bufs[i].size = src->bufs[i].size; + } +} + +static inline int +fill_raw_passthrough_params(struct cnxk_iov *iov, struct cpt_inst_s *inst) +{ + const union cpt_inst_w4 w4 = { + .s.opcode_major = ROC_SE_MAJOR_OP_MISC, + .s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH, + .s.param1 = 1, + .s.param2 = 1, + .s.dlen = 0, + }; + + inst->w0.u64 = 0; + inst->w5.u64 = 0; + inst->w4.u64 = w4.u64; + + raw_memcpy(iov); + + return 0; +} + +static __rte_always_inline int +fill_raw_fc_params(struct cnxk_iov *iov, struct cnxk_se_sess *sess, struct cpt_qp_meta_info *m_info, + struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst, const bool is_kasumi, + const bool is_aead, const bool is_sg_ver2) +{ + uint32_t cipher_len, auth_len = 0; + struct roc_se_fc_params fc_params; + uint8_t cpt_op = sess->cpt_op; + uint64_t d_offs, d_lens; + uint8_t ccm_iv_buf[16]; + uint32_t flags = 0; + void *mdata = NULL; + uint32_t iv_buf[4]; + int ret; + + fc_params.cipher_iv_len = sess->iv_length; + fc_params.ctx = &sess->roc_se_ctx; + fc_params.auth_iv_buf = NULL; + fc_params.auth_iv_len = 0; + fc_params.mac_buf.size = 0; + fc_params.mac_buf.vaddr = 0; + fc_params.iv_buf = NULL; + + if (likely(sess->iv_length)) { + flags |= ROC_SE_VALID_IV_BUF; + + if (sess->is_gmac) { + fc_params.iv_buf = iov->aad_buf; + if (sess->short_iv) { + memcpy((void *)iv_buf, iov->aad_buf, 12); + iv_buf[3] = rte_cpu_to_be_32(0x1); + fc_params.iv_buf = iv_buf; + } + } else { + fc_params.iv_buf = iov->iv_buf; + if (sess->short_iv) { + memcpy((void *)iv_buf, iov->iv_buf, 12); + iv_buf[3] = rte_cpu_to_be_32(0x1); + fc_params.iv_buf = iv_buf; + } + } + + if (sess->aes_ccm) { + memcpy((uint8_t *)ccm_iv_buf, iov->iv_buf, sess->iv_length + 1); + ccm_iv_buf[0] = 14 - sess->iv_length; + fc_params.iv_buf = ccm_iv_buf; + } + } + + fc_params.src_iov = (void *)iov->src; + fc_params.dst_iov = (void *)iov->dst; + + cipher_len = iov->data_len - iov->c_head - iov->c_tail; + auth_len = iov->data_len - iov->a_head - iov->a_tail; + + d_offs = (iov->c_head << 16) | iov->a_head; + d_lens = ((uint64_t)cipher_len << 32) | auth_len; + + if (is_aead) { + uint16_t aad_len = sess->aad_length; + + if (likely(aad_len == 0)) { + d_offs = (iov->c_head << 16) | iov->c_head; + d_lens = ((uint64_t)cipher_len << 32) | cipher_len; + } else { + flags |= ROC_SE_VALID_AAD_BUF; + fc_params.aad_buf.size = sess->aad_length; + /* For AES CCM, AAD is written 18B after aad.data as per API */ + if (sess->aes_ccm) + fc_params.aad_buf.vaddr = PLT_PTR_ADD((uint8_t *)iov->aad_buf, 18); + else + fc_params.aad_buf.vaddr = iov->aad_buf; + + d_offs = (iov->c_head << 16); + d_lens = ((uint64_t)cipher_len << 32); + } + } + + if (likely(sess->mac_len)) { + flags |= ROC_SE_VALID_MAC_BUF; + fc_params.mac_buf.size = sess->mac_len; + fc_params.mac_buf.vaddr = iov->mac_buf; + } + + fc_params.meta_buf.vaddr = NULL; + mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req); + if (mdata == NULL) { + plt_dp_err("Error allocating meta buffer for request"); + return -ENOMEM; + } + + if (is_kasumi) { + if (cpt_op & ROC_SE_OP_ENCODE) + ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, + is_sg_ver2); + else + ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, + is_sg_ver2); + } else { + if (cpt_op & ROC_SE_OP_ENCODE) + ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, + is_sg_ver2); + else + ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, + is_sg_ver2); + } + + if (unlikely(ret)) { + plt_dp_err("Preparing request failed due to bad input arg"); + goto free_mdata_and_exit; + } + + return 0; + +free_mdata_and_exit: + rte_mempool_put(m_info->pool, infl_req->mdata); + return ret; +} + +static __rte_always_inline int +fill_raw_digest_params(struct cnxk_iov *iov, struct cnxk_se_sess *sess, + struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req, + struct cpt_inst_s *inst, const bool is_sg_ver2) +{ + uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK; + struct roc_se_fc_params fc_params; + uint16_t mac_len = sess->mac_len; + uint64_t d_offs, d_lens; + uint32_t auth_len = 0; + uint32_t flags = 0; + void *mdata = NULL; + uint32_t space = 0; + int ret; + + memset(&fc_params, 0, sizeof(struct roc_se_fc_params)); + fc_params.cipher_iv_len = sess->iv_length; + fc_params.ctx = &sess->roc_se_ctx; + + mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req); + if (mdata == NULL) { + plt_dp_err("Error allocating meta buffer for request"); + ret = -ENOMEM; + goto err_exit; + } + + flags |= ROC_SE_VALID_MAC_BUF; + fc_params.src_iov = (void *)iov->src; + auth_len = iov->data_len - iov->a_head - iov->a_tail; + d_lens = auth_len; + d_offs = iov->a_head; + + if (auth_op == ROC_SE_OP_AUTH_GENERATE) { + fc_params.mac_buf.size = sess->mac_len; + fc_params.mac_buf.vaddr = iov->mac_buf; + } else { + uint64_t *op = mdata; + + /* Need space for storing generated mac */ + space += 2 * sizeof(uint64_t); + + fc_params.mac_buf.vaddr = (uint8_t *)mdata + space; + fc_params.mac_buf.size = mac_len; + space += RTE_ALIGN_CEIL(mac_len, 8); + op[0] = (uintptr_t)iov->mac_buf; + op[1] = mac_len; + infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY; + } + + fc_params.meta_buf.vaddr = (uint8_t *)mdata + space; + fc_params.meta_buf.size -= space; + + ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, is_sg_ver2); + if (ret) + goto free_mdata_and_exit; + + return 0; + +free_mdata_and_exit: + if (infl_req->op_flags & CPT_OP_FLAGS_METABUF) + rte_mempool_put(m_info->pool, infl_req->mdata); +err_exit: + return ret; +} + #endif /*_CNXK_SE_H_ */ diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index 5ccfcbd7a60..bb5a2c629e5 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2022 NXP + * Copyright 2016-2023 NXP * */ @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -138,16 +139,14 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); /* o/p segs */ while (mbuf->next) { sge->length = mbuf->data_len; out_len += sge->length; sge++; mbuf = mbuf->next; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); } /* using buf_len for last buf - so that extra data can be added */ sge->length = mbuf->buf_len - mbuf->data_off; @@ -165,8 +164,7 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FLE_FIN(ip_fle); /* Configure input SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; in_len += sge->length; @@ -174,8 +172,7 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess, /* i/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; in_len += sge->length; mbuf = mbuf->next; @@ -247,13 +244,11 @@ build_proto_compound_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); /* Configure Output FLE with dst mbuf data */ - DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); - DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); + DPAA2_SET_FLE_ADDR(op_fle, rte_pktmbuf_iova(dst_mbuf)); DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); /* Configure Input FLE with src mbuf data */ - DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); - DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); + DPAA2_SET_FLE_ADDR(ip_fle, rte_pktmbuf_iova(src_mbuf)); DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); DPAA2_SET_FD_LEN(fd, ip_fle->length); @@ -373,16 +368,14 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, sym_op->aead.data.length; /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); sge->length = mbuf->data_len - sym_op->aead.data.offset; mbuf = mbuf->next; /* o/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; mbuf = mbuf->next; } @@ -420,17 +413,14 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, sge++; } - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + - mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); sge->length = mbuf->data_len - sym_op->aead.data.offset; mbuf = mbuf->next; /* i/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; mbuf = mbuf->next; } @@ -535,8 +525,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, DPAA2_SET_FLE_SG_EXT(fle); /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); - DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->aead.data.offset); sge->length = sym_op->aead.data.length; if (sess->dir == DIR_ENC) { @@ -571,9 +560,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, sge++; } - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); - DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + - sym_op->m_src->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->aead.data.offset); sge->length = sym_op->aead.data.length; if (sess->dir == DIR_DEC) { sge++; @@ -666,16 +653,14 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, sym_op->cipher.data.length; /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); sge->length = mbuf->data_len - sym_op->auth.data.offset; mbuf = mbuf->next; /* o/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; mbuf = mbuf->next; } @@ -706,17 +691,14 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, sge->length = sess->iv.length; sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + - mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); sge->length = mbuf->data_len - sym_op->auth.data.offset; mbuf = mbuf->next; /* i/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; mbuf = mbuf->next; } @@ -830,9 +812,7 @@ build_authenc_fd(dpaa2_sec_session *sess, DPAA2_SET_FLE_SG_EXT(fle); /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); - DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + - dst->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->cipher.data.offset); sge->length = sym_op->cipher.data.length; if (sess->dir == DIR_ENC) { @@ -862,9 +842,7 @@ build_authenc_fd(dpaa2_sec_session *sess, sge->length = sess->iv.length; sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); - DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + - sym_op->m_src->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->auth.data.offset); sge->length = sym_op->auth.data.length; if (sess->dir == DIR_DEC) { sge++; @@ -965,8 +943,7 @@ static inline int build_auth_sg_fd( sge++; } /* i/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); if (data_len <= (mbuf->data_len - data_offset)) { sge->length = data_len; @@ -978,8 +955,7 @@ static inline int build_auth_sg_fd( while ((data_len = data_len - sge->length) && (mbuf = mbuf->next)) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); if (data_len > mbuf->data_len) sge->length = mbuf->data_len; else @@ -1097,8 +1073,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, } /* Setting data to authenticate */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); - DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); sge->length = data_len; if (sess->dir == DIR_DEC) { @@ -1183,16 +1158,14 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FLE_SG_EXT(op_fle); /* o/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); sge->length = mbuf->data_len - data_offset; mbuf = mbuf->next; /* o/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; mbuf = mbuf->next; } @@ -1212,22 +1185,19 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, /* i/p IV */ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sess->iv.length; sge++; /* i/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); sge->length = mbuf->data_len - data_offset; mbuf = mbuf->next; /* i/p segs */ while (mbuf) { sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); sge->length = mbuf->data_len; mbuf = mbuf->next; } @@ -1328,8 +1298,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, sess->iv.length, sym_op->m_src->data_off); - DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); - DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); + DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset); fle->length = data_len + sess->iv.length; @@ -1349,8 +1318,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, sge->length = sess->iv.length; sge++; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); - DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); sge->length = data_len; DPAA2_SET_FLE_FIN(sge); @@ -1663,7 +1631,7 @@ dpaa2_sec_dump(struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) sess = SECURITY_GET_SESS_PRIV(op->sym->session); #endif @@ -1690,7 +1658,7 @@ dpaa2_sec_dump(struct rte_crypto_op *op) sess->digest_length, sess->status, sess->ext_params.aead_ctxt.auth_only_len, sess->ext_params.aead_ctxt.auth_cipher_text); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY printf("PDCP session params:\n" "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" @@ -3037,6 +3005,15 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; authdata->algmode = OP_ALG_AAI_HMAC; break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + authdata->algmode = OP_ALG_AAI_HMAC; + if (session->digest_length == 6) + authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_96; + else if (session->digest_length == 14) + authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_224; + else + authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_112; + break; case RTE_CRYPTO_AUTH_SHA256_HMAC: authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; authdata->algmode = OP_ALG_AAI_HMAC; @@ -3064,7 +3041,6 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, case RTE_CRYPTO_AUTH_NULL: authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; break; - case RTE_CRYPTO_AUTH_SHA224_HMAC: case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: @@ -3195,9 +3171,9 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, session->ctxt_type = DPAA2_SEC_IPSEC; if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { - uint8_t *hdr = NULL; - struct ip ip4_hdr; - struct rte_ipv6_hdr ip6_hdr; + uint8_t hdr[48] = {}; + struct rte_ipv4_hdr *ip4_hdr; + struct rte_ipv6_hdr *ip6_hdr; struct ipsec_encap_pdb encap_pdb; flc->dhr = SEC_FLC_DHR_OUTBOUND; @@ -3220,34 +3196,85 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | PDBOPTS_ESP_OIHI_PDB_INL | - PDBOPTS_ESP_IVSRC | PDBHMO_ESP_SNR; - if (ipsec_xform->options.dec_ttl) - encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; + + if (ipsec_xform->options.iv_gen_disable == 0) + encap_pdb.options |= PDBOPTS_ESP_IVSRC; if (ipsec_xform->options.esn) encap_pdb.options |= PDBOPTS_ESP_ESN; + if (ipsec_xform->options.copy_dscp) + encap_pdb.options |= PDBOPTS_ESP_DIFFSERV; + if (ipsec_xform->options.ecn) + encap_pdb.options |= PDBOPTS_ESP_TECN; encap_pdb.spi = ipsec_xform->spi; session->dir = DIR_ENC; if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { - encap_pdb.ip_hdr_len = sizeof(struct ip); - ip4_hdr.ip_v = IPVERSION; - ip4_hdr.ip_hl = 5; - ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); - ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; - ip4_hdr.ip_id = 0; - ip4_hdr.ip_off = 0; - ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; - ip4_hdr.ip_p = IPPROTO_ESP; - ip4_hdr.ip_sum = 0; - ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; - ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; - ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) - &ip4_hdr, sizeof(struct ip)); - hdr = (uint8_t *)&ip4_hdr; + if (ipsec_xform->options.dec_ttl) + encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; + if (ipsec_xform->options.copy_df) + encap_pdb.options |= PDBHMO_ESP_DFBIT; + ip4_hdr = (struct rte_ipv4_hdr *)hdr; + + encap_pdb.ip_hdr_len = sizeof(struct rte_ipv4_hdr); + ip4_hdr->version_ihl = RTE_IPV4_VHL_DEF; + ip4_hdr->time_to_live = ipsec_xform->tunnel.ipv4.ttl ? + ipsec_xform->tunnel.ipv4.ttl : 0x40; + ip4_hdr->type_of_service = (ipsec_xform->tunnel.ipv4.dscp<<2); + + ip4_hdr->hdr_checksum = 0; + ip4_hdr->packet_id = 0; + if (ipsec_xform->tunnel.ipv4.df) { + uint16_t frag_off = 0; + + frag_off |= RTE_IPV4_HDR_DF_FLAG; + ip4_hdr->fragment_offset = rte_cpu_to_be_16(frag_off); + } else + ip4_hdr->fragment_offset = 0; + + memcpy(&ip4_hdr->src_addr, &ipsec_xform->tunnel.ipv4.src_ip, + sizeof(struct in_addr)); + memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip, + sizeof(struct in_addr)); + if (ipsec_xform->options.udp_encap) { + uint16_t sport, dport; + struct rte_udp_hdr *uh = + (struct rte_udp_hdr *) (hdr + + sizeof(struct rte_ipv4_hdr)); + + sport = ipsec_xform->udp.sport ? + ipsec_xform->udp.sport : 4500; + dport = ipsec_xform->udp.dport ? + ipsec_xform->udp.dport : 4500; + uh->src_port = rte_cpu_to_be_16(sport); + uh->dst_port = rte_cpu_to_be_16(dport); + uh->dgram_len = 0; + uh->dgram_cksum = 0; + + ip4_hdr->next_proto_id = IPPROTO_UDP; + ip4_hdr->total_length = + rte_cpu_to_be_16( + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr)); + encap_pdb.ip_hdr_len += + sizeof(struct rte_udp_hdr); + encap_pdb.options |= + PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; + } else { + ip4_hdr->total_length = + rte_cpu_to_be_16( + sizeof(struct rte_ipv4_hdr)); + ip4_hdr->next_proto_id = IPPROTO_ESP; + } + + ip4_hdr->hdr_checksum = calc_chksum((uint16_t *) + (void *)ip4_hdr, sizeof(struct rte_ipv4_hdr)); + } else if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { - ip6_hdr.vtc_flow = rte_cpu_to_be_32( + ip6_hdr = (struct rte_ipv6_hdr *)hdr; + + ip6_hdr->vtc_flow = rte_cpu_to_be_32( DPAA2_IPv6_DEFAULT_VTC_FLOW | ((ipsec_xform->tunnel.ipv6.dscp << RTE_IPV6_HDR_TC_SHIFT) & @@ -3256,18 +3283,17 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_IPV6_HDR_FL_SHIFT) & RTE_IPV6_HDR_FL_MASK)); /* Payload length will be updated by HW */ - ip6_hdr.payload_len = 0; - ip6_hdr.hop_limits = - ipsec_xform->tunnel.ipv6.hlimit; - ip6_hdr.proto = (ipsec_xform->proto == + ip6_hdr->payload_len = 0; + ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ? + ipsec_xform->tunnel.ipv6.hlimit : 0x40; + ip6_hdr->proto = (ipsec_xform->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP : IPPROTO_AH; - memcpy(&ip6_hdr.src_addr, + memcpy(&ip6_hdr->src_addr, &ipsec_xform->tunnel.ipv6.src_addr, 16); - memcpy(&ip6_hdr.dst_addr, + memcpy(&ip6_hdr->dst_addr, &ipsec_xform->tunnel.ipv6.dst_addr, 16); encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); - hdr = (uint8_t *)&ip6_hdr; } bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, @@ -3294,12 +3320,22 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, break; } - decap_pdb.options = (ipsec_xform->tunnel.type == - RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? - sizeof(struct ip) << 16 : - sizeof(struct rte_ipv6_hdr) << 16; + if (ipsec_xform->tunnel.type == + RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + decap_pdb.options = sizeof(struct ip) << 16; + if (ipsec_xform->options.copy_df) + decap_pdb.options |= PDBHMO_ESP_DFV; + if (ipsec_xform->options.dec_ttl) + decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL; + } else { + decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; + } if (ipsec_xform->options.esn) decap_pdb.options |= PDBOPTS_ESP_ESN; + if (ipsec_xform->options.copy_dscp) + decap_pdb.options |= PDBOPTS_ESP_DIFFSERV; + if (ipsec_xform->options.ecn) + decap_pdb.options |= PDBOPTS_ESP_TECN; if (ipsec_xform->replay_win_sz) { uint32_t win_sz; @@ -3512,6 +3548,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, session->auth_key.data = NULL; session->auth_key.length = 0; session->auth_alg = 0; + authdata.algtype = PDCP_AUTH_TYPE_NULL; } authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; @@ -3537,12 +3574,20 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, session->auth_alg); goto out; } - p_authdata = &authdata; - } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { - DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); - goto out; + } else { + if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { + DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); + goto out; + } + session->auth_key.data = NULL; + session->auth_key.length = 0; + session->auth_alg = 0; } + authdata.key = (size_t)session->auth_key.data; + authdata.keylen = session->auth_key.length; + authdata.key_enc_flags = 0; + authdata.key_type = RTA_DATA_IMM; if (pdcp_xform->sdap_enabled) { int nb_keys_to_inline = @@ -4400,7 +4445,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, else rta_set_sec_era(RTA_SEC_ERA_8); - DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); + DPAA2_SEC_INFO("2-SEC ERA is %d", USER_SEC_ERA(rta_get_sec_era())); /* Invoke PMD device initialization function */ retval = dpaa2_sec_dev_init(cryptodev); diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h index f84d2caf434..1c0bc3d6de8 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016,2020-2022 NXP + * Copyright 2016,2020-2023 NXP * */ @@ -878,7 +878,46 @@ static const struct rte_cryptodev_capabilities dpaa2_pdcp_capabilities[] = { }, } }, } }, - + { /* NULL (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + }, }, + }, }, + }, + { /* NULL (CIPHER) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .iv_size = { + .min = 0, + .max = 0, + .increment = 0 + } + }, }, + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -890,8 +929,14 @@ static const struct rte_security_capability dpaa2_sec_security_cap[] = { .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, - .options = { 0 }, - .replay_win_sz_max = 128 + .options = { + .udp_encap = 1, + .copy_df = 1, + .copy_dscp = 1, + .dec_ttl = 1, + .esn = 1, + }, + .replay_win_sz_max = 1024 }, .crypto_capabilities = dpaa2_sec_capabilities }, @@ -902,8 +947,15 @@ static const struct rte_security_capability dpaa2_sec_security_cap[] = { .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, - .options = { 0 }, - .replay_win_sz_max = 128 + .options = { + .iv_gen_disable = 1, + .udp_encap = 1, + .copy_df = 1, + .copy_dscp = 1, + .dec_ttl = 1, + .esn = 1, + }, + .replay_win_sz_max = 1024 }, .crypto_capabilities = dpaa2_sec_capabilities }, diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c index 36c79e450ab..4754b9d6f8b 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c @@ -95,29 +95,25 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx, /* OOP */ if (dest_sgl) { /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + ofs.ofs.cipher.head); sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head; /* o/p segs */ for (i = 1; i < dest_sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = dest_sgl->vec[i].len; } sge->length -= ofs.ofs.cipher.tail; } else { /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head); sge->length = sgl->vec[0].len - ofs.ofs.cipher.head; /* o/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } sge->length -= ofs.ofs.cipher.tail; @@ -148,14 +144,12 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx, sge->length = sess->iv.length; sge++; - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.auth.head); sge->length = sgl->vec[0].len - ofs.ofs.auth.head; for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } @@ -244,28 +238,24 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx, /* OOP */ if (dest_sgl) { /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + ofs.ofs.cipher.head); sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head; /* o/p segs */ for (i = 1; i < dest_sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = dest_sgl->vec[i].len; } } else { /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head); sge->length = sgl->vec[0].len - ofs.ofs.cipher.head; /* o/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } } @@ -299,15 +289,13 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx, sge++; } - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head); sge->length = sgl->vec[0].len - ofs.ofs.cipher.head; /* i/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } @@ -412,8 +400,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx, sge++; } /* i/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, data_offset); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset); if (data_len <= (int)(sgl->vec[0].len - data_offset)) { sge->length = data_len; @@ -423,7 +410,6 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx, for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } } @@ -502,14 +488,12 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx, if (dest_sgl) { /* Configure Output SGE for Encap/Decap */ DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = dest_sgl->vec[0].len; out_len += sge->length; /* o/p segs */ for (i = 1; i < dest_sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = dest_sgl->vec[i].len; out_len += sge->length; } @@ -518,14 +502,12 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx, } else { /* Configure Output SGE for Encap/Decap */ DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[0].len; out_len += sge->length; /* o/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; out_len += sge->length; } @@ -545,14 +527,12 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx, /* Configure input SGE for Encap/Decap */ DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[0].len; in_len += sge->length; /* i/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; in_len += sge->length; } @@ -638,28 +618,24 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, /* OOP */ if (dest_sgl) { /* o/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, data_offset); + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + data_offset); sge->length = dest_sgl->vec[0].len - data_offset; /* o/p segs */ for (i = 1; i < dest_sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = dest_sgl->vec[i].len; } } else { /* o/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, data_offset); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset); sge->length = sgl->vec[0].len - data_offset; /* o/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } } @@ -678,21 +654,18 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, /* i/p IV */ DPAA2_SET_FLE_ADDR(sge, iv->iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sess->iv.length; sge++; /* i/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, data_offset); + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset); sge->length = sgl->vec[0].len - data_offset; /* i/p segs */ for (i = 1; i < sgl->num; i++) { sge++; DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); sge->length = sgl->vec[i].len; } DPAA2_SET_FLE_FIN(sge); diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 7d47c32693e..a301e8edb2a 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -666,7 +666,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) sess = SECURITY_GET_SESS_PRIV(op->sym->session); #endif @@ -677,7 +677,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) cdb = &sess->cdb; rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb)); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY printf("\nsession protocol type = %d\n", sess->proto_alg); #endif printf("\n****************************************\n" @@ -702,7 +702,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) sess->iv.length, sess->iv.offset, sess->digest_length, sess->auth_only_len, sess->auth_cipher_text); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY printf("PDCP session params:\n" "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:" @@ -2817,6 +2817,15 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, "+++Using sha256-hmac truncated len is non-standard," "it will not work with lookaside proto"); break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + session->auth_key.algmode = OP_ALG_AAI_HMAC; + if (session->digest_length == 6) + session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_96; + else if (session->digest_length == 14) + session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_224; + else + session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_112; + break; case RTE_CRYPTO_AUTH_SHA384_HMAC: session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; session->auth_key.algmode = OP_ALG_AAI_HMAC; @@ -2836,7 +2845,6 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96; session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; break; - case RTE_CRYPTO_AUTH_SHA224_HMAC: case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: @@ -3188,6 +3196,11 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, auth_xform->key.length); session->auth_alg = auth_xform->algo; } else { + if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { + DPAA_SEC_ERR("Crypto: Integrity must for c-plane"); + ret = -EINVAL; + goto out; + } session->auth_key.data = NULL; session->auth_key.length = 0; session->auth_alg = 0; diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h index 412a9da9429..eff6dcf311f 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright 2016-2022 NXP + * Copyright 2016-2023 NXP * */ @@ -782,6 +782,46 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { }, } }, } }, + { /* NULL (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + }, }, + }, }, + }, + { /* NULL (CIPHER) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .iv_size = { + .min = 0, + .max = 0, + .increment = 0 + } + }, }, + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build index 3057e6fd102..87bf9655541 100644 --- a/drivers/crypto/ipsec_mb/meson.build +++ b/drivers/crypto/ipsec_mb/meson.build @@ -16,6 +16,11 @@ lib = cc.find_library('IPSec_MB', required: false) if not lib.found() build = false reason = 'missing dependency, "libIPSec_MB"' +# if the lib is found, check it's the right format +elif meson.version().version_compare('>=0.60') and not cc.links( + 'int main(void) {return 0;}', dependencies: lib) + build = false + reason = 'incompatible dependency, "libIPSec_MB"' else ext_deps += lib diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 9e298023d7d..7f610659398 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1438,6 +1438,54 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, return 0; } +/** Check if conditions are met for digest-appended operations */ +static uint8_t * +aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job, + uint32_t oop) +{ + unsigned int auth_size, cipher_size; + uint8_t *end_cipher; + uint8_t *start_cipher; + + if (job->cipher_mode == IMB_CIPHER_NULL) + return NULL; + + if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3 || + job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || + job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { + cipher_size = (op->sym->cipher.data.offset >> 3) + + (op->sym->cipher.data.length >> 3); + } else { + cipher_size = (op->sym->cipher.data.offset) + + (op->sym->cipher.data.length); + } + if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || + job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || + job->hash_alg == IMB_AUTH_KASUMI_UIA1 || + job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { + auth_size = (op->sym->auth.data.offset >> 3) + + (op->sym->auth.data.length >> 3); + } else { + auth_size = (op->sym->auth.data.offset) + + (op->sym->auth.data.length); + } + + if (!oop) { + end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, cipher_size); + start_cipher = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + } else { + end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, cipher_size); + start_cipher = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); + } + + if (start_cipher < op->sym->auth.digest.data && + op->sym->auth.digest.data < end_cipher) { + return rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, auth_size); + } else { + return NULL; + } +} + /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1580,9 +1628,12 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, } else { if (aead) job->auth_tag_output = op->sym->aead.digest.data; - else - job->auth_tag_output = op->sym->auth.digest.data; - + else { + job->auth_tag_output = aesni_mb_digest_appended_in_src(op, job, oop); + if (job->auth_tag_output == NULL) { + job->auth_tag_output = op->sym->auth.digest.data; + } + } if (session->auth.req_digest_len != job->auth_tag_output_len_in_bytes) { job->auth_tag_output = @@ -1917,6 +1968,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) struct aesni_mb_session *sess = NULL; uint8_t *linear_buf = NULL; int sgl = 0; + uint8_t oop = 0; uint8_t is_docsis_sec = 0; if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { @@ -1962,8 +2014,54 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) op->sym->auth.digest.data, sess->auth.req_digest_len, &op->status); - } else + } else { + if (!op->sym->m_dst || op->sym->m_dst == op->sym->m_src) { + /* in-place operation */ + oop = 0; + } else { /* out-of-place operation */ + oop = 1; + } + + /* Enable digest check */ + if (op->sym->m_src->nb_segs == 1 && op->sym->m_dst != NULL + && !is_aead_algo(job->hash_alg, sess->template_job.cipher_mode) && + aesni_mb_digest_appended_in_src(op, job, oop) != NULL) { + unsigned int auth_size, cipher_size; + int unencrypted_bytes = 0; + if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN || + job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN || + job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { + cipher_size = (op->sym->cipher.data.offset >> 3) + + (op->sym->cipher.data.length >> 3); + } else { + cipher_size = (op->sym->cipher.data.offset) + + (op->sym->cipher.data.length); + } + if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN || + job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN || + job->hash_alg == IMB_AUTH_KASUMI_UIA1 || + job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) { + auth_size = (op->sym->auth.data.offset >> 3) + + (op->sym->auth.data.length >> 3); + } else { + auth_size = (op->sym->auth.data.offset) + + (op->sym->auth.data.length); + } + /* Check for unencrypted bytes in partial digest cases */ + if (job->cipher_mode != IMB_CIPHER_NULL) { + unencrypted_bytes = auth_size + + job->auth_tag_output_len_in_bytes - cipher_size; + } + if (unencrypted_bytes > 0) + rte_memcpy( + rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, + cipher_size), + rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, + cipher_size), + unencrypted_bytes); + } generate_digest(job, op, sess); + } break; default: op->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -2555,7 +2653,8 @@ RTE_INIT(ipsec_mb_register_aesni_mb) RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | - RTE_CRYPTODEV_FF_SECURITY; + RTE_CRYPTODEV_FF_SECURITY | + RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED; aesni_mb_data->internals_priv_size = 0; aesni_mb_data->ops = &aesni_mb_pmd_ops; diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c index 5e8624cebe1..c2348824177 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/drivers/crypto/openssl/rte_openssl_pmd.c @@ -2673,12 +2673,8 @@ process_openssl_sm2_op_evp(struct rte_crypto_op *cop, { EVP_PKEY_CTX *kctx = NULL, *sctx = NULL, *cctx = NULL; struct rte_crypto_asym_op *op = cop->asym; - OSSL_PARAM_BLD *param_bld = NULL; - OSSL_PARAM *params = NULL; + OSSL_PARAM *params = sess->u.sm2.params; EVP_PKEY *pkey = NULL; - BIGNUM *pkey_bn = NULL; - uint8_t pubkey[64]; - size_t len = 0; int ret = -1; cop->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -2686,50 +2682,6 @@ process_openssl_sm2_op_evp(struct rte_crypto_op *cop, if (cop->asym->sm2.k.data != NULL) goto err_sm2; - param_bld = OSSL_PARAM_BLD_new(); - if (!param_bld) { - OPENSSL_LOG(ERR, "failed to allocate params\n"); - goto err_sm2; - } - - ret = OSSL_PARAM_BLD_push_utf8_string(param_bld, - OSSL_PKEY_PARAM_GROUP_NAME, "SM2", 0); - if (!ret) { - OPENSSL_LOG(ERR, "failed to push params\n"); - goto err_sm2; - } - - pkey_bn = BN_bin2bn((const unsigned char *)op->sm2.pkey.data, - op->sm2.pkey.length, pkey_bn); - - memset(pubkey, 0, RTE_DIM(pubkey)); - pubkey[0] = 0x04; - len += 1; - memcpy(&pubkey[len], op->sm2.q.x.data, op->sm2.q.x.length); - len += op->sm2.q.x.length; - memcpy(&pubkey[len], op->sm2.q.y.data, op->sm2.q.y.length); - len += op->sm2.q.y.length; - - ret = OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, - pkey_bn); - if (!ret) { - OPENSSL_LOG(ERR, "failed to push params\n"); - goto err_sm2; - } - - ret = OSSL_PARAM_BLD_push_octet_string(param_bld, - OSSL_PKEY_PARAM_PUB_KEY, pubkey, len); - if (!ret) { - OPENSSL_LOG(ERR, "failed to push params\n"); - goto err_sm2; - } - - params = OSSL_PARAM_BLD_to_param(param_bld); - if (!params) { - OPENSSL_LOG(ERR, "failed to push params\n"); - goto err_sm2; - } - switch (op->sm2.op_type) { case RTE_CRYPTO_ASYM_OP_ENCRYPT: { @@ -2940,9 +2892,6 @@ process_openssl_sm2_op_evp(struct rte_crypto_op *cop, if (pkey) EVP_PKEY_free(pkey); - if (param_bld) - OSSL_PARAM_BLD_free(param_bld); - return ret; } diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c index 85a4fa3e555..2862c294a93 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -593,6 +593,23 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { }, } }, + { /* SM2 */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_SM2, + .hash_algos = (1 << RTE_CRYPTO_AUTH_SM3), + .op_types = + ((1<sm2.hash != RTE_CRYPTO_AUTH_SM3) - return -1; - param_bld = OSSL_PARAM_BLD_new(); if (!param_bld) { OPENSSL_LOG(ERR, "failed to allocate params\n"); @@ -1306,6 +1323,38 @@ static int openssl_set_asym_session_parameters( goto err_sm2; } + ret = OSSL_PARAM_BLD_push_utf8_string(param_bld, + OSSL_PKEY_PARAM_GROUP_NAME, "SM2", 0); + if (!ret) { + OPENSSL_LOG(ERR, "failed to push params\n"); + goto err_sm2; + } + + pkey_bn = BN_bin2bn((const unsigned char *)xform->ec.pkey.data, + xform->ec.pkey.length, pkey_bn); + + ret = OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, + pkey_bn); + if (!ret) { + OPENSSL_LOG(ERR, "failed to push params\n"); + goto err_sm2; + } + + memset(pubkey, 0, sizeof(pubkey)); + pubkey[0] = 0x04; + len += 1; + memcpy(&pubkey[len], xform->ec.q.x.data, xform->ec.q.x.length); + len += xform->ec.q.x.length; + memcpy(&pubkey[len], xform->ec.q.y.data, xform->ec.q.y.length); + len += xform->ec.q.y.length; + + ret = OSSL_PARAM_BLD_push_octet_string(param_bld, + OSSL_PKEY_PARAM_PUB_KEY, pubkey, len); + if (!ret) { + OPENSSL_LOG(ERR, "failed to push params\n"); + goto err_sm2; + } + params = OSSL_PARAM_BLD_to_param(param_bld); if (!params) { OPENSSL_LOG(ERR, "failed to push params\n"); diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index d25e1b2f3aa..0a939161f99 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -637,6 +637,8 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx, struct icp_qat_fw_la_bulk_req *req; int32_t data_len; uint32_t tail = dp_ctx->tail; + struct rte_crypto_va_iova_ptr null_digest; + struct rte_crypto_va_iova_ptr *job_digest = digest; req = (struct icp_qat_fw_la_bulk_req *)( (uint8_t *)tx_queue->base_addr + tail); @@ -650,7 +652,12 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) return -1; - enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs, + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + null_digest.iova = cookie->digest_null_phys_addr; + job_digest = &null_digest; + } + + enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, auth_iv, ofs, (uint32_t)data_len); dp_ctx->tail = tail; @@ -672,6 +679,8 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, uint32_t tail; struct icp_qat_fw_la_bulk_req *req; int32_t data_len; + struct rte_crypto_va_iova_ptr null_digest; + struct rte_crypto_va_iova_ptr *job_digest = NULL; n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); if (unlikely(n == 0)) { @@ -704,7 +713,13 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) break; - enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i], + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + null_digest.iova = cookie->digest_null_phys_addr; + job_digest = &null_digest; + } else + job_digest = &vec->digest[i]; + + enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, &vec->auth_iv[i], ofs, (uint32_t)data_len); tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h index cab7e214c0b..37647374d5d 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -10,6 +10,13 @@ #include "qat_sym_session.h" #include "qat_sym.h" +#define AES_OR_3DES_MISALIGNED (ctx->qat_mode == ICP_QAT_HW_CIPHER_CBC_MODE && \ + ((((ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128) || \ + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES192) || \ + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256)) && \ + (cipher_param->cipher_length % ICP_QAT_HW_AES_BLK_SZ)) || \ + ((ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) && \ + (cipher_param->cipher_length % ICP_QAT_HW_3DES_BLK_SZ)))) #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \ RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n) @@ -704,6 +711,21 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, auth_param->auth_off = ofs.ofs.auth.head; auth_param->auth_len = auth_len; auth_param->auth_res_addr = digest->iova; + /* Input cipher length alignment requirement for 3DES-CBC and AES-CBC. + * For 3DES-CBC cipher algo, ESP Payload size requires 8 Byte aligned. + * For AES-CBC cipher algo, ESP Payload size requires 16 Byte aligned. + * The alignment should be guaranteed by the ESP package padding field + * according to the RFC4303. Under this condition, QAT will pass through + * chain job as NULL cipher and NULL auth operation and report misalignment + * error detected. + */ + if (AES_OR_3DES_MISALIGNED) { + QAT_LOG(ERR, "Input cipher length alignment error detected.\n"); + ctx->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL; + ctx->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; + cipher_param->cipher_length = 0; + auth_param->auth_len = 0; + } switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index 70938ba508c..e4bcfa59e74 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -598,6 +598,8 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx, struct icp_qat_fw_la_bulk_req *req; int32_t data_len; uint32_t tail = dp_ctx->tail; + struct rte_crypto_va_iova_ptr null_digest; + struct rte_crypto_va_iova_ptr *job_digest = digest; req = (struct icp_qat_fw_la_bulk_req *)( (uint8_t *)tx_queue->base_addr + tail); @@ -611,8 +613,13 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) return -1; - enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs, - (uint32_t)data_len); + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + null_digest.iova = cookie->digest_null_phys_addr; + job_digest = &null_digest; + } + + enqueue_one_auth_job_gen1(ctx, req, job_digest, auth_iv, ofs, + (uint32_t)data_len); dp_ctx->tail = tail; dp_ctx->cached_enqueue++; @@ -636,6 +643,8 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t tail; struct icp_qat_fw_la_bulk_req *req; int32_t data_len; + struct rte_crypto_va_iova_ptr null_digest; + struct rte_crypto_va_iova_ptr *job_digest = NULL; n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); if (unlikely(n == 0)) { @@ -668,7 +677,14 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) break; - enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i], + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + null_digest.iova = cookie->digest_null_phys_addr; + job_digest = &null_digest; + } else + job_digest = &vec->digest[i]; + + enqueue_one_auth_job_gen1(ctx, req, job_digest, &vec->auth_iv[i], ofs, (uint32_t)data_len); tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; @@ -703,6 +719,8 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, struct icp_qat_fw_la_bulk_req *req; int32_t data_len; uint32_t tail = dp_ctx->tail; + struct rte_crypto_va_iova_ptr null_digest; + struct rte_crypto_va_iova_ptr *job_digest = digest; req = (struct icp_qat_fw_la_bulk_req *)( (uint8_t *)tx_queue->base_addr + tail); @@ -715,8 +733,13 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) return -1; + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + null_digest.iova = cookie->digest_null_phys_addr; + job_digest = &null_digest; + } + if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs, - NULL, 0, cipher_iv, digest, auth_iv, ofs, + NULL, 0, cipher_iv, job_digest, auth_iv, ofs, (uint32_t)data_len))) return -1; @@ -743,6 +766,8 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t tail; struct icp_qat_fw_la_bulk_req *req; int32_t data_len; + struct rte_crypto_va_iova_ptr null_digest; + struct rte_crypto_va_iova_ptr *job_digest; n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); if (unlikely(n == 0)) { @@ -776,10 +801,16 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) break; + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + null_digest.iova = cookie->digest_null_phys_addr; + job_digest = &null_digest; + } else + job_digest = &vec->digest[i]; + if (unlikely(enqueue_one_chain_job_gen1(ctx, req, vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0, - &vec->iv[i], &vec->digest[i], + &vec->iv[i], job_digest, &vec->auth_iv[i], ofs, (uint32_t)data_len))) break; diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c index 7abd5134235..0f196ace306 100644 --- a/drivers/crypto/qat/qat_asym.c +++ b/drivers/crypto/qat/qat_asym.c @@ -593,7 +593,7 @@ ecdsa_set_input(struct icp_qat_fw_pke_request *qat_req, qat_func_alignsize = RTE_ALIGN_CEIL(qat_function.bytesize, 8); - SET_PKE_9A_IN(asym_op->ecdsa.pkey, 0); + SET_PKE_9A_IN(xform->ec.pkey, 0); SET_PKE_9A_IN(asym_op->ecdsa.message, 1); SET_PKE_9A_IN(asym_op->ecdsa.k, 2); SET_PKE_9A_EC(curve[curve_id], b, 3); @@ -635,8 +635,8 @@ ecdsa_set_input(struct icp_qat_fw_pke_request *qat_req, SET_PKE_9A_EC(curve[curve_id], n, 7); SET_PKE_9A_EC(curve[curve_id], x, 6); SET_PKE_9A_EC(curve[curve_id], y, 5); - SET_PKE_9A_IN(asym_op->ecdsa.q.x, 4); - SET_PKE_9A_IN(asym_op->ecdsa.q.y, 3); + SET_PKE_9A_IN(xform->ec.q.x, 4); + SET_PKE_9A_IN(xform->ec.q.y, 3); SET_PKE_9A_EC(curve[curve_id], a, 2); SET_PKE_9A_EC(curve[curve_id], b, 1); SET_PKE_9A_EC(curve[curve_id], p, 0); diff --git a/drivers/crypto/scheduler/meson.build b/drivers/crypto/scheduler/meson.build index cd18efc7915..752d6554153 100644 --- a/drivers/crypto/scheduler/meson.build +++ b/drivers/crypto/scheduler/meson.build @@ -7,7 +7,7 @@ if is_windows subdir_done() endif -deps += ['bus_vdev', 'reorder'] +deps += ['bus_vdev', 'reorder', 'security'] sources = files( 'rte_cryptodev_scheduler.c', 'scheduler_failover.c', diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 258d6f8c433..9a21edd32a9 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -5,11 +5,14 @@ #include #include #include +#include #include #include "rte_cryptodev_scheduler.h" #include "scheduler_pmd_private.h" +#define MAX_CAPS 256 + /** update the scheduler pmd's capability with attaching device's * capability. * For each device to be attached, the scheduler's capability should be @@ -59,7 +62,6 @@ sync_caps(struct rte_cryptodev_capabilities *caps, cap->sym.auth.digest_size.max ? s_cap->sym.auth.digest_size.max : cap->sym.auth.digest_size.max; - } if (s_cap->sym.xform_type == @@ -81,25 +83,173 @@ sync_caps(struct rte_cryptodev_capabilities *caps, memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap)); sync_nb_caps--; + i--; } return sync_nb_caps; } static int -update_scheduler_capability(struct scheduler_ctx *sched_ctx) +check_sec_cap_equal(const struct rte_security_capability *sec_cap1, + struct rte_security_capability *sec_cap2) +{ + if (sec_cap1->action != sec_cap2->action || + sec_cap1->protocol != sec_cap2->protocol || + sec_cap1->ol_flags != sec_cap2->ol_flags) + return 0; + + if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS) + return !memcmp(&sec_cap1->docsis, &sec_cap2->docsis, + sizeof(sec_cap1->docsis)); + else + return 0; +} + +static void +copy_sec_cap(struct rte_security_capability *dst_sec_cap, + struct rte_security_capability *src_sec_cap) +{ + dst_sec_cap->action = src_sec_cap->action; + dst_sec_cap->protocol = src_sec_cap->protocol; + if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS) + dst_sec_cap->docsis = src_sec_cap->docsis; + dst_sec_cap->ol_flags = src_sec_cap->ol_flags; +} + +static uint32_t +sync_sec_crypto_caps(struct rte_cryptodev_capabilities *tmp_sec_crypto_caps, + const struct rte_cryptodev_capabilities *sec_crypto_caps, + const struct rte_cryptodev_capabilities *worker_sec_crypto_caps) +{ + uint8_t nb_caps = 0; + + nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps, sec_crypto_caps); + sync_caps(tmp_sec_crypto_caps, nb_caps, worker_sec_crypto_caps); + + return nb_caps; +} + +/** update the scheduler pmd's security capability with attaching device's + * security capability. + * For each device to be attached, the scheduler's security capability should + * be the common capability set of all workers + **/ +static uint32_t +sync_sec_caps(uint32_t worker_idx, + struct rte_security_capability *sec_caps, + struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS], + uint32_t nb_sec_caps, + const struct rte_security_capability *worker_sec_caps) { - struct rte_cryptodev_capabilities tmp_caps[256] = { {0} }; - uint32_t nb_caps = 0, i; + uint32_t nb_worker_sec_caps = 0, i; + + if (worker_sec_caps == NULL) + return 0; + + while (worker_sec_caps[nb_worker_sec_caps].action != + RTE_SECURITY_ACTION_TYPE_NONE) + nb_worker_sec_caps++; + + /* Handle first worker */ + if (worker_idx == 0) { + uint32_t nb_worker_sec_crypto_caps = 0; + uint32_t nb_worker_supp_sec_caps = 0; + + for (i = 0; i < nb_worker_sec_caps; i++) { + /* Check for supported security protocols */ + if (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action, + worker_sec_caps[i].protocol)) + continue; - if (sched_ctx->capabilities) { - rte_free(sched_ctx->capabilities); - sched_ctx->capabilities = NULL; + sec_caps[nb_worker_supp_sec_caps] = worker_sec_caps[i]; + + while (worker_sec_caps[i].crypto_capabilities[ + nb_worker_sec_crypto_caps].op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) + nb_worker_sec_crypto_caps++; + + rte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0], + &worker_sec_caps[i].crypto_capabilities[0], + sizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) * + nb_worker_sec_crypto_caps); + + nb_worker_supp_sec_caps++; + } + return nb_worker_supp_sec_caps; } - for (i = 0; i < sched_ctx->nb_workers; i++) { - struct rte_cryptodev_info dev_info; + for (i = 0; i < nb_sec_caps; i++) { + struct rte_security_capability *sec_cap = &sec_caps[i]; + uint32_t j; + + for (j = 0; j < nb_worker_sec_caps; j++) { + struct rte_cryptodev_capabilities + tmp_sec_crypto_caps[MAX_CAPS] = { {0} }; + uint32_t nb_sec_crypto_caps = 0; + const struct rte_security_capability *worker_sec_cap = + &worker_sec_caps[j]; + + if (!check_sec_cap_equal(worker_sec_cap, sec_cap)) + continue; + + /* Sync the crypto caps of the common security cap */ + nb_sec_crypto_caps = sync_sec_crypto_caps( + tmp_sec_crypto_caps, + &sec_crypto_caps[i][0], + &worker_sec_cap->crypto_capabilities[0]); + + memset(&sec_crypto_caps[i][0], 0, + sizeof(sec_crypto_caps[i][0]) * MAX_CAPS); + + rte_memcpy(&sec_crypto_caps[i][0], + &tmp_sec_crypto_caps[0], + sizeof(sec_crypto_caps[i][0]) * nb_sec_crypto_caps); + + break; + } + + if (j < nb_worker_sec_caps) + continue; + + /* + * Remove an uncommon security cap, and it's associated crypto + * caps, from the arrays + */ + for (j = i; j < nb_sec_caps - 1; j++) { + rte_memcpy(&sec_caps[j], &sec_caps[j+1], + sizeof(*sec_cap)); + + rte_memcpy(&sec_crypto_caps[j][0], + &sec_crypto_caps[j+1][0], + sizeof(*&sec_crypto_caps[j][0]) * + MAX_CAPS); + } + memset(&sec_caps[nb_sec_caps - 1], 0, sizeof(*sec_cap)); + memset(&sec_crypto_caps[nb_sec_caps - 1][0], 0, + sizeof(*&sec_crypto_caps[nb_sec_caps - 1][0]) * + MAX_CAPS); + nb_sec_caps--; + i--; + } + + return nb_sec_caps; +} + +static int +update_scheduler_capability(struct scheduler_ctx *sched_ctx) +{ + struct rte_cryptodev_capabilities tmp_caps[MAX_CAPS] = { {0} }; + struct rte_security_capability tmp_sec_caps[MAX_CAPS] = { {0} }; + struct rte_cryptodev_capabilities + tmp_sec_crypto_caps[MAX_CAPS][MAX_CAPS] = { {{0}} }; + uint32_t nb_caps = 0, nb_sec_caps = 0, i; + struct rte_cryptodev_info dev_info; + + /* Free any previously allocated capability memory */ + scheduler_free_capabilities(sched_ctx); + /* Determine the new cryptodev capabilities for the scheduler */ + for (i = 0; i < sched_ctx->nb_workers; i++) { rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities); @@ -116,6 +266,54 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx) rte_memcpy(sched_ctx->capabilities, tmp_caps, sizeof(struct rte_cryptodev_capabilities) * nb_caps); + /* Determine the new security capabilities for the scheduler */ + for (i = 0; i < sched_ctx->nb_workers; i++) { + struct rte_cryptodev *dev = + &rte_cryptodevs[sched_ctx->workers[i].dev_id]; + struct rte_security_ctx *sec_ctx = dev->security_ctx; + + nb_sec_caps = sync_sec_caps(i, tmp_sec_caps, tmp_sec_crypto_caps, + nb_sec_caps, rte_security_capabilities_get(sec_ctx)); + } + + sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL, + sizeof(struct rte_security_capability) * + (nb_sec_caps + 1), 0, SOCKET_ID_ANY); + if (!sched_ctx->sec_capabilities) + return -ENOMEM; + + sched_ctx->sec_crypto_capabilities = rte_zmalloc_socket(NULL, + sizeof(struct rte_cryptodev_capabilities *) * + (nb_sec_caps + 1), + 0, SOCKET_ID_ANY); + if (!sched_ctx->sec_crypto_capabilities) + return -ENOMEM; + + for (i = 0; i < nb_sec_caps; i++) { + uint16_t nb_sec_crypto_caps = 0; + + copy_sec_cap(&sched_ctx->sec_capabilities[i], &tmp_sec_caps[i]); + + while (tmp_sec_crypto_caps[i][nb_sec_crypto_caps].op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) + nb_sec_crypto_caps++; + + sched_ctx->sec_crypto_capabilities[i] = + rte_zmalloc_socket(NULL, + sizeof(struct rte_cryptodev_capabilities) * + (nb_sec_crypto_caps + 1), 0, SOCKET_ID_ANY); + if (!sched_ctx->sec_crypto_capabilities[i]) + return -ENOMEM; + + rte_memcpy(sched_ctx->sec_crypto_capabilities[i], + &tmp_sec_crypto_caps[i][0], + sizeof(struct rte_cryptodev_capabilities) + * nb_sec_crypto_caps); + + sched_ctx->sec_capabilities[i].crypto_capabilities = + sched_ctx->sec_crypto_capabilities[i]; + } + return 0; } @@ -205,6 +403,7 @@ rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id) sched_ctx->nb_workers++; if (update_scheduler_capability(sched_ctx) < 0) { + scheduler_free_capabilities(sched_ctx); worker->dev_id = 0; worker->driver_id = 0; sched_ctx->nb_workers--; @@ -266,6 +465,7 @@ rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id) sched_ctx->nb_workers--; if (update_scheduler_capability(sched_ctx) < 0) { + scheduler_free_capabilities(sched_ctx); CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h index 88da8368ec4..efb6a1cb67f 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h @@ -24,7 +24,7 @@ extern "C" { #endif -/** Maximum number of bonded devices per device */ +/** Maximum number of bonding devices per device */ #ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS #define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS (8) #endif diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index f24d2fc44b2..52ff2ffbb7f 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -28,7 +28,7 @@ failover_worker_enqueue(struct scheduler_worker *worker, { uint16_t processed_ops; - scheduler_set_worker_session(ops, nb_ops, index); + scheduler_set_worker_sessions(ops, nb_ops, index); processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id, worker->qp_id, ops, nb_ops); @@ -51,7 +51,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) ops, nb_ops, PRIMARY_WORKER_IDX); if (enqueued_ops < nb_ops) { - scheduler_retrieve_session(&ops[enqueued_ops], + scheduler_retrieve_sessions(&ops[enqueued_ops], nb_ops - enqueued_ops); enqueued_ops += failover_worker_enqueue( &qp_ctx->secondary_worker, @@ -59,7 +59,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) nb_ops - enqueued_ops, SECONDARY_WORKER_IDX); if (enqueued_ops < nb_ops) - scheduler_retrieve_session(&ops[enqueued_ops], + scheduler_retrieve_sessions(&ops[enqueued_ops], nb_ops - enqueued_ops); } @@ -102,7 +102,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK; if (nb_deq_ops == nb_ops) - goto retrieve_session; + goto retrieve_sessions; worker = workers[qp_ctx->deq_idx]; @@ -112,8 +112,8 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) worker->nb_inflight_cops -= nb_deq_ops2; } -retrieve_session: - scheduler_retrieve_session(ops, nb_deq_ops + nb_deq_ops2); +retrieve_sessions: + scheduler_retrieve_sessions(ops, nb_deq_ops + nb_deq_ops2); return nb_deq_ops + nb_deq_ops2; } diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index 3dea850661f..a21b522f9fa 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -183,7 +183,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev) while (!mc_ctx->stop_signal) { if (pending_enq_ops) { - scheduler_set_worker_session( + scheduler_set_worker_sessions( &enq_ops[pending_enq_ops_idx], pending_enq_ops, worker_idx); processed_ops = @@ -192,7 +192,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev) &enq_ops[pending_enq_ops_idx], pending_enq_ops); if (processed_ops < pending_deq_ops) - scheduler_retrieve_session( + scheduler_retrieve_sessions( &enq_ops[pending_enq_ops_idx + processed_ops], pending_deq_ops - processed_ops); @@ -203,13 +203,13 @@ mc_scheduler_worker(struct rte_cryptodev *dev) processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops, MC_SCHED_BUFFER_SIZE, NULL); if (processed_ops) { - scheduler_set_worker_session(enq_ops, + scheduler_set_worker_sessions(enq_ops, processed_ops, worker_idx); pending_enq_ops_idx = rte_cryptodev_enqueue_burst( worker->dev_id, worker->qp_id, enq_ops, processed_ops); if (pending_enq_ops_idx < processed_ops) - scheduler_retrieve_session( + scheduler_retrieve_sessions( enq_ops + pending_enq_ops_idx, processed_ops - pending_enq_ops_idx); @@ -229,7 +229,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev) worker->dev_id, worker->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE); if (processed_ops) { - scheduler_retrieve_session(deq_ops, + scheduler_retrieve_sessions(deq_ops, processed_ops); inflight_ops -= processed_ops; if (reordering_enabled) { diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 0c51fff9300..30bb5ce0e2b 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -59,7 +59,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) } for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - struct scheduler_session_ctx *sess_ctx[4]; uint8_t target[4]; uint32_t job_len[4]; @@ -76,17 +75,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) rte_prefetch0((uint8_t *)ops[i + 7]->sym->session + sizeof(struct rte_cryptodev_sym_session)); - sess_ctx[0] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session); - sess_ctx[1] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 1]->sym->session); - sess_ctx[2] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 2]->sym->session); - sess_ctx[3] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 3]->sym->session); - - /* job_len is initialized as cipher data length, once - * it is 0, equals to auth data length - */ - job_len[0] = ops[i]->sym->cipher.data.length; - job_len[0] += (ops[i]->sym->cipher.data.length == 0) * - ops[i]->sym->auth.data.length; + job_len[0] = scheduler_get_job_len(ops[i]); /* decide the target op based on the job length */ target[0] = !(job_len[0] & psd_qp_ctx->threshold); p_enq_op = &enq_ops[target[0]]; @@ -100,15 +89,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } - if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i]->sym->session = - sess_ctx[0]->worker_sess[target[0]]; + scheduler_set_single_worker_session(ops[i], target[0]); sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i]; p_enq_op->pos++; - job_len[1] = ops[i + 1]->sym->cipher.data.length; - job_len[1] += (ops[i + 1]->sym->cipher.data.length == 0) * - ops[i+1]->sym->auth.data.length; + job_len[1] = scheduler_get_job_len(ops[i + 1]); target[1] = !(job_len[1] & psd_qp_ctx->threshold); p_enq_op = &enq_ops[target[1]]; @@ -118,15 +103,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } - if (ops[i + 1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i + 1]->sym->session = - sess_ctx[1]->worker_sess[target[1]]; + scheduler_set_single_worker_session(ops[i + 1], target[1]); sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1]; p_enq_op->pos++; - job_len[2] = ops[i + 2]->sym->cipher.data.length; - job_len[2] += (ops[i + 2]->sym->cipher.data.length == 0) * - ops[i + 2]->sym->auth.data.length; + job_len[2] = scheduler_get_job_len(ops[i + 2]); target[2] = !(job_len[2] & psd_qp_ctx->threshold); p_enq_op = &enq_ops[target[2]]; @@ -136,15 +117,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } - if (ops[i + 2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i + 2]->sym->session = - sess_ctx[2]->worker_sess[target[2]]; + scheduler_set_single_worker_session(ops[i + 2], target[2]); sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2]; p_enq_op->pos++; - job_len[3] = ops[i + 3]->sym->cipher.data.length; - job_len[3] += (ops[i + 3]->sym->cipher.data.length == 0) * - ops[i + 3]->sym->auth.data.length; + job_len[3] = scheduler_get_job_len(ops[i + 3]); target[3] = !(job_len[3] & psd_qp_ctx->threshold); p_enq_op = &enq_ops[target[3]]; @@ -154,22 +131,16 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } - if (ops[i + 3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i + 3]->sym->session = - sess_ctx[3]->worker_sess[target[3]]; + scheduler_set_single_worker_session(ops[i + 3], target[3]); sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3]; p_enq_op->pos++; } for (; i < nb_ops; i++) { - struct scheduler_session_ctx *sess_ctx = - CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session); uint32_t job_len; uint8_t target; - job_len = ops[i]->sym->cipher.data.length; - job_len += (ops[i]->sym->cipher.data.length == 0) * - ops[i]->sym->auth.data.length; + job_len = scheduler_get_job_len(ops[i]); target = !(job_len & psd_qp_ctx->threshold); p_enq_op = &enq_ops[target]; @@ -179,8 +150,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } - if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i]->sym->session = sess_ctx->worker_sess[target]; + scheduler_set_single_worker_session(ops[i], target); sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i]; p_enq_op->pos++; } @@ -236,7 +206,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) if (worker->nb_inflight_cops) { nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id, worker->qp_id, ops, nb_ops); - scheduler_retrieve_session(ops, nb_deq_ops_pri); + scheduler_retrieve_sessions(ops, nb_deq_ops_pri); worker->nb_inflight_cops -= nb_deq_ops_pri; } @@ -251,7 +221,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id, worker->qp_id, &ops[nb_deq_ops_pri], nb_ops - nb_deq_ops_pri); - scheduler_retrieve_session(&ops[nb_deq_ops_pri], nb_deq_ops_sec); + scheduler_retrieve_sessions(&ops[nb_deq_ops_pri], nb_deq_ops_sec); worker->nb_inflight_cops -= nb_deq_ops_sec; if (!worker->nb_inflight_cops) diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c index 4e8bbf0e09c..589d092d745 100644 --- a/drivers/crypto/scheduler/scheduler_pmd.c +++ b/drivers/crypto/scheduler/scheduler_pmd.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -77,6 +78,23 @@ const struct scheduler_parse_map scheduler_ordering_map[] = { #define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':' +static void +free_mem(struct rte_cryptodev *dev) +{ + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + int i; + + for (i = 0; i < sched_ctx->nb_init_workers; i++) { + rte_free(sched_ctx->init_worker_names[i]); + sched_ctx->init_worker_names[i] = NULL; + } + + scheduler_free_capabilities(sched_ctx); + + rte_free(dev->security_ctx); + dev->security_ctx = NULL; +} + static int cryptodev_scheduler_create(const char *name, struct rte_vdev_device *vdev, @@ -206,8 +224,8 @@ cryptodev_scheduler_create(const char *name, if (!sched_ctx->init_worker_names[ sched_ctx->nb_init_workers]) { - CR_SCHED_LOG(ERR, "driver %s: Insufficient memory", - name); + CR_SCHED_LOG(ERR, "Not enough memory for init worker name"); + free_mem(dev); return -ENOMEM; } @@ -228,8 +246,38 @@ cryptodev_scheduler_create(const char *name, 0, SOCKET_ID_ANY); if (!sched_ctx->capabilities) { - CR_SCHED_LOG(ERR, "Not enough memory for capability " - "information"); + CR_SCHED_LOG(ERR, "Not enough memory for capability information"); + free_mem(dev); + return -ENOMEM; + } + + /* Initialize security context */ + struct rte_security_ctx *security_instance; + security_instance = rte_zmalloc_socket(NULL, + sizeof(struct rte_security_ctx), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!security_instance) { + CR_SCHED_LOG(ERR, "Not enough memory for security context"); + free_mem(dev); + return -ENOMEM; + } + + security_instance->device = dev; + security_instance->ops = rte_crypto_scheduler_pmd_sec_ops; + security_instance->sess_cnt = 0; + dev->security_ctx = security_instance; + + /* + * Initialize security capabilities structure as an empty structure, + * in case device information is requested when no workers are attached + */ + sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL, + sizeof(struct rte_security_capability), + 0, SOCKET_ID_ANY); + + if (!sched_ctx->sec_capabilities) { + CR_SCHED_LOG(ERR, "Not enough memory for security capability information"); + free_mem(dev); return -ENOMEM; } @@ -263,6 +311,9 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev) sched_ctx->workers[i].dev_id); } + rte_free(dev->security_ctx); + dev->security_ctx = NULL; + return rte_cryptodev_pmd_destroy(dev); } diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c index 294aab4452d..a18f7a08b0d 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c @@ -8,11 +8,205 @@ #include #include #include +#include #include #include #include "scheduler_pmd_private.h" +struct scheduler_configured_sess_info { + uint8_t dev_id; + uint8_t driver_id; + union { + struct rte_cryptodev_sym_session *sess; + struct { + struct rte_security_session *sec_sess; + struct rte_security_ctx *sec_ctx; + }; + }; +}; + +static int +scheduler_session_create(void *sess, void *sess_params, + struct scheduler_ctx *sched_ctx, + enum rte_crypto_op_sess_type session_type) +{ + struct rte_mempool *mp = rte_mempool_from_obj(sess); + struct scheduler_session_ctx *sess_ctx; + struct scheduler_configured_sess_info configured_sess[ + RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}}; + uint32_t i, j, n_configured_sess = 0; + int ret = 0; + + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) + sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess); + else + sess_ctx = SECURITY_GET_SESS_PRIV(sess); + + if (mp == NULL) + return -EINVAL; + + for (i = 0; i < sched_ctx->nb_workers; i++) { + struct scheduler_worker *worker = &sched_ctx->workers[i]; + struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id]; + uint8_t next_worker = 0; + + for (j = 0; j < n_configured_sess; j++) { + if (configured_sess[j].driver_id == worker->driver_id) { + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) + sess_ctx->worker_sess[i] = + configured_sess[j].sess; + else + sess_ctx->worker_sec_sess[i] = + configured_sess[j].sec_sess; + + next_worker = 1; + break; + } + } + if (next_worker) + continue; + + if (rte_mempool_avail_count(mp) == 0) { + ret = -ENOMEM; + goto error_exit; + } + + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) { + struct rte_cryptodev_sym_session *worker_sess = + rte_cryptodev_sym_session_create(worker->dev_id, + sess_params, mp); + + if (worker_sess == NULL) { + ret = -rte_errno; + goto error_exit; + } + + worker_sess->opaque_data = (uint64_t)sess; + sess_ctx->worker_sess[i] = worker_sess; + configured_sess[n_configured_sess].sess = worker_sess; + } else { + struct rte_security_session *worker_sess = + rte_security_session_create(dev->security_ctx, + sess_params, mp); + + if (worker_sess == NULL) { + ret = -rte_errno; + goto error_exit; + } + + worker_sess->opaque_data = (uint64_t)sess; + sess_ctx->worker_sec_sess[i] = worker_sess; + configured_sess[n_configured_sess].sec_sess = + worker_sess; + configured_sess[n_configured_sess].sec_ctx = + dev->security_ctx; + } + + configured_sess[n_configured_sess].driver_id = + worker->driver_id; + configured_sess[n_configured_sess].dev_id = worker->dev_id; + n_configured_sess++; + } + + return 0; + +error_exit: + sess_ctx->ref_cnt = sched_ctx->ref_cnt; + for (i = 0; i < n_configured_sess; i++) { + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) + rte_cryptodev_sym_session_free( + configured_sess[i].dev_id, + configured_sess[i].sess); + else + rte_security_session_destroy( + configured_sess[i].sec_ctx, + configured_sess[i].sec_sess); + } + + return ret; +} + +static void +scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx, + uint8_t session_type) +{ + struct scheduler_session_ctx *sess_ctx; + struct scheduler_configured_sess_info deleted_sess[ + RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}}; + uint32_t i, j, n_deleted_sess = 0; + + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) + sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess); + else + sess_ctx = SECURITY_GET_SESS_PRIV(sess); + + if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) { + CR_SCHED_LOG(WARNING, + "Worker updated between session creation/deletion. " + "The session may not be freed fully."); + } + + for (i = 0; i < sched_ctx->nb_workers; i++) { + struct scheduler_worker *worker = &sched_ctx->workers[i]; + struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id]; + uint8_t next_worker = 0; + + for (j = 0; j < n_deleted_sess; j++) { + if (deleted_sess[j].driver_id == worker->driver_id) { + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) + sess_ctx->worker_sess[i] = NULL; + else + sess_ctx->worker_sec_sess[i] = NULL; + + next_worker = 1; + break; + } + } + if (next_worker) + continue; + + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) { + rte_cryptodev_sym_session_free(worker->dev_id, + sess_ctx->worker_sess[i]); + sess_ctx->worker_sess[i] = NULL; + } else { + rte_security_session_destroy(dev->security_ctx, + sess_ctx->worker_sec_sess[i]); + sess_ctx->worker_sec_sess[i] = NULL; + } + + deleted_sess[n_deleted_sess++].driver_id = worker->driver_id; + } +} + +static unsigned int +scheduler_session_size_get(struct scheduler_ctx *sched_ctx, + uint8_t session_type) +{ + uint8_t i = 0; + uint32_t max_priv_sess_size = 0; + + /* Check what is the maximum private session size for all workers */ + for (i = 0; i < sched_ctx->nb_workers; i++) { + uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; + struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id]; + struct rte_security_ctx *sec_ctx = dev->security_ctx; + uint32_t priv_sess_size = 0; + + if (session_type == RTE_CRYPTO_OP_WITH_SESSION) { + priv_sess_size = + (*dev->dev_ops->sym_session_get_size)(dev); + } else { + priv_sess_size = (*sec_ctx->ops->session_get_size)(dev); + } + + max_priv_sess_size = RTE_MAX(max_priv_sess_size, priv_sess_size); + } + + return max_priv_sess_size; +} + /** attaching the workers predefined by scheduler's EAL options */ static int scheduler_attach_init_worker(struct rte_cryptodev *dev) @@ -265,10 +459,7 @@ scheduler_pmd_close(struct rte_cryptodev *dev) sched_ctx->private_ctx = NULL; } - if (sched_ctx->capabilities) { - rte_free(sched_ctx->capabilities); - sched_ctx->capabilities = NULL; - } + scheduler_free_capabilities(sched_ctx); return 0; } @@ -451,92 +642,21 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, } static uint32_t -scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) +scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - uint8_t i = 0; - uint32_t max_priv_sess_size = 0; - - /* Check what is the maximum private session size for all workers */ - for (i = 0; i < sched_ctx->nb_workers; i++) { - uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; - struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id]; - uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); - if (max_priv_sess_size < priv_sess_size) - max_priv_sess_size = priv_sess_size; - } - - return max_priv_sess_size; + return scheduler_session_size_get(sched_ctx, RTE_CRYPTO_OP_WITH_SESSION); } -struct scheduler_configured_sess_info { - uint8_t dev_id; - uint8_t driver_id; - struct rte_cryptodev_sym_session *sess; -}; - static int scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct rte_mempool *mp = rte_mempool_from_obj(sess); - struct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess); - struct scheduler_configured_sess_info configured_sess[ - RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}}; - uint32_t i, j, n_configured_sess = 0; - int ret = 0; - - if (mp == NULL) - return -EINVAL; - - for (i = 0; i < sched_ctx->nb_workers; i++) { - struct scheduler_worker *worker = &sched_ctx->workers[i]; - struct rte_cryptodev_sym_session *worker_sess; - uint8_t next_worker = 0; - - for (j = 0; j < n_configured_sess; j++) { - if (configured_sess[j].driver_id == - worker->driver_id) { - sess_ctx->worker_sess[i] = - configured_sess[j].sess; - next_worker = 1; - break; - } - } - if (next_worker) - continue; - - if (rte_mempool_avail_count(mp) == 0) { - ret = -ENOMEM; - goto error_exit; - } - - worker_sess = rte_cryptodev_sym_session_create(worker->dev_id, - xform, mp); - if (worker_sess == NULL) { - ret = -rte_errno; - goto error_exit; - } - worker_sess->opaque_data = (uint64_t)sess; - sess_ctx->worker_sess[i] = worker_sess; - configured_sess[n_configured_sess].driver_id = - worker->driver_id; - configured_sess[n_configured_sess].dev_id = worker->dev_id; - configured_sess[n_configured_sess].sess = worker_sess; - n_configured_sess++; - } - - return 0; -error_exit: - sess_ctx->ref_cnt = sched_ctx->ref_cnt; - for (i = 0; i < n_configured_sess; i++) - rte_cryptodev_sym_session_free(configured_sess[i].dev_id, - configured_sess[i].sess); - return ret; + return scheduler_session_create(sess, xform, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION); } /** Clear the memory of session so it doesn't leave key material behind */ @@ -545,37 +665,8 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess); - struct scheduler_configured_sess_info deleted_sess[ - RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}}; - uint32_t i, j, n_deleted_sess = 0; - - if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) { - CR_SCHED_LOG(WARNING, - "Worker updated between session creation/deletion. " - "The session may not be freed fully."); - } - - for (i = 0; i < sched_ctx->nb_workers; i++) { - struct scheduler_worker *worker = &sched_ctx->workers[i]; - uint8_t next_worker = 0; - for (j = 0; j < n_deleted_sess; j++) { - if (deleted_sess[j].driver_id == worker->driver_id) { - sess_ctx->worker_sess[i] = NULL; - next_worker = 1; - break; - } - } - if (next_worker) - continue; - - rte_cryptodev_sym_session_free(worker->dev_id, - sess_ctx->worker_sess[i]); - - deleted_sess[n_deleted_sess++].driver_id = worker->driver_id; - sess_ctx->worker_sess[i] = NULL; - } + scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION); } static struct rte_cryptodev_ops scheduler_pmd_ops = { @@ -598,3 +689,66 @@ static struct rte_cryptodev_ops scheduler_pmd_ops = { }; struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; + +/** Configure a scheduler session from a security session configuration */ +static int +scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf, + struct rte_security_session *sess) +{ + struct rte_cryptodev *cdev = dev; + struct scheduler_ctx *sched_ctx = cdev->data->dev_private; + + /* Check for supported security protocols */ + if (!scheduler_check_sec_proto_supp(conf->action_type, conf->protocol)) { + CR_SCHED_LOG(ERR, "Unsupported security protocol"); + return -ENOTSUP; + } + + return scheduler_session_create(sess, conf, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION); +} + +/** Clear the memory of session so it doesn't leave key material behind */ +static int +scheduler_pmd_sec_sess_destroy(void *dev, + struct rte_security_session *sess) +{ + struct rte_cryptodev *cdev = dev; + struct scheduler_ctx *sched_ctx = cdev->data->dev_private; + + scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION); + + return 0; +} + +/** Get sync security capabilities for scheduler pmds */ +static const struct rte_security_capability * +scheduler_pmd_sec_capa_get(void *dev) +{ + struct rte_cryptodev *cdev = dev; + struct scheduler_ctx *sched_ctx = cdev->data->dev_private; + + return sched_ctx->sec_capabilities; +} + +static unsigned int +scheduler_pmd_sec_sess_size_get(void *dev) +{ + struct rte_cryptodev *cdev = dev; + struct scheduler_ctx *sched_ctx = cdev->data->dev_private; + + return scheduler_session_size_get(sched_ctx, + RTE_CRYPTO_OP_SECURITY_SESSION); +} + +static struct rte_security_ops scheduler_pmd_sec_ops = { + .session_create = scheduler_pmd_sec_sess_create, + .session_update = NULL, + .session_get_size = scheduler_pmd_sec_sess_size_get, + .session_stats_get = NULL, + .session_destroy = scheduler_pmd_sec_sess_destroy, + .set_pkt_metadata = NULL, + .capabilities_get = scheduler_pmd_sec_capa_get +}; + +struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops = + &scheduler_pmd_sec_ops; diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index 36d0bb6307e..26110277a4f 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -5,6 +5,8 @@ #ifndef _SCHEDULER_PMD_PRIVATE_H #define _SCHEDULER_PMD_PRIVATE_H +#include + #include "rte_cryptodev_scheduler.h" #define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler @@ -30,7 +32,8 @@ struct scheduler_ctx { /**< private scheduler context pointer */ struct rte_cryptodev_capabilities *capabilities; - uint32_t nb_capabilities; + struct rte_security_capability *sec_capabilities; + struct rte_cryptodev_capabilities **sec_crypto_capabilities; uint32_t max_nb_queue_pairs; @@ -64,8 +67,12 @@ struct scheduler_qp_ctx { struct scheduler_session_ctx { uint32_t ref_cnt; - struct rte_cryptodev_sym_session *worker_sess[ - RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; + union { + struct rte_cryptodev_sym_session *worker_sess[ + RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; + struct rte_security_session *worker_sec_sess[ + RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; + }; }; extern uint8_t cryptodev_scheduler_driver_id; @@ -108,7 +115,22 @@ scheduler_order_drain(struct rte_ring *order_ring, } static __rte_always_inline void -scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops, +scheduler_set_single_worker_session(struct rte_crypto_op *op, + uint8_t worker_idx) +{ + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + struct scheduler_session_ctx *sess_ctx = + CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); + op->sym->session = sess_ctx->worker_sess[worker_idx]; + } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + struct scheduler_session_ctx *sess_ctx = + SECURITY_GET_SESS_PRIV(op->sym->session); + op->sym->session = sess_ctx->worker_sec_sess[worker_idx]; + } +} + +static __rte_always_inline void +scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops, uint8_t worker_index) { struct rte_crypto_op **op = ops; @@ -129,52 +151,34 @@ scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops, rte_prefetch0(op[7]->sym->session); } - if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - struct scheduler_session_ctx *sess_ctx = - CRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session); - op[0]->sym->session = - sess_ctx->worker_sess[worker_index]; - } - - if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - struct scheduler_session_ctx *sess_ctx = - CRYPTODEV_GET_SYM_SESS_PRIV(op[1]->sym->session); - op[1]->sym->session = - sess_ctx->worker_sess[worker_index]; - } - - if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - struct scheduler_session_ctx *sess_ctx = - CRYPTODEV_GET_SYM_SESS_PRIV(op[2]->sym->session); - op[2]->sym->session = - sess_ctx->worker_sess[worker_index]; - } - - if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - struct scheduler_session_ctx *sess_ctx = - CRYPTODEV_GET_SYM_SESS_PRIV(op[3]->sym->session); - op[3]->sym->session = - sess_ctx->worker_sess[worker_index]; - } + scheduler_set_single_worker_session(op[0], worker_index); + scheduler_set_single_worker_session(op[1], worker_index); + scheduler_set_single_worker_session(op[2], worker_index); + scheduler_set_single_worker_session(op[3], worker_index); op += 4; n -= 4; } while (n--) { - if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - struct scheduler_session_ctx *sess_ctx = - CRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session); - - op[0]->sym->session = - sess_ctx->worker_sess[worker_index]; - op++; - } + scheduler_set_single_worker_session(op[0], worker_index); + op++; } } static __rte_always_inline void -scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops) +scheduler_retrieve_single_session(struct rte_crypto_op *op) +{ + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + op->sym->session = (void *)(uintptr_t) + rte_cryptodev_sym_session_opaque_data_get(op->sym->session); + else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) + op->sym->session = (void *)(uintptr_t) + rte_security_session_opaque_data_get(op->sym->session); +} + +static __rte_always_inline void +scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops) { uint16_t n = nb_ops; struct rte_crypto_op **op = ops; @@ -194,32 +198,73 @@ scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops) rte_prefetch0(op[7]->sym->session); } - if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - op[0]->sym->session = (void *)(uintptr_t) - rte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session); - if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - op[1]->sym->session = (void *)(uintptr_t) - rte_cryptodev_sym_session_opaque_data_get(op[1]->sym->session); - if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - op[2]->sym->session = (void *)(uintptr_t) - rte_cryptodev_sym_session_opaque_data_get(op[2]->sym->session); - if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - op[3]->sym->session = (void *)(uintptr_t) - rte_cryptodev_sym_session_opaque_data_get(op[3]->sym->session); + scheduler_retrieve_single_session(op[0]); + scheduler_retrieve_single_session(op[1]); + scheduler_retrieve_single_session(op[2]); + scheduler_retrieve_single_session(op[3]); op += 4; n -= 4; } while (n--) { - if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - op[0]->sym->session = (void *)(uintptr_t) - rte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session); + scheduler_retrieve_single_session(op[0]); op++; } } +static __rte_always_inline uint32_t +scheduler_get_job_len(struct rte_crypto_op *op) +{ + uint32_t job_len; + + /* op_len is initialized as cipher data length, if + * it is 0, then it is set to auth data length + */ + job_len = op->sym->cipher.data.length; + job_len += (op->sym->cipher.data.length == 0) * + op->sym->auth.data.length; + + return job_len; +} + +static __rte_always_inline void +scheduler_free_capabilities(struct scheduler_ctx *sched_ctx) +{ + uint32_t i; + + rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + + if (sched_ctx->sec_crypto_capabilities) { + i = 0; + while (sched_ctx->sec_crypto_capabilities[i] != NULL) { + rte_free(sched_ctx->sec_crypto_capabilities[i]); + sched_ctx->sec_crypto_capabilities[i] = NULL; + i++; + } + + rte_free(sched_ctx->sec_crypto_capabilities); + sched_ctx->sec_crypto_capabilities = NULL; + } + + rte_free(sched_ctx->sec_capabilities); + sched_ctx->sec_capabilities = NULL; +} + +static __rte_always_inline int +scheduler_check_sec_proto_supp(enum rte_security_session_action_type action, + enum rte_security_session_protocol protocol) +{ + if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL && + protocol == RTE_SECURITY_PROTOCOL_DOCSIS) + return 1; + + return 0; +} + /** device specific operations function pointer structure */ extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops; +extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops; #endif /* _SCHEDULER_PMD_PRIVATE_H */ diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index ad3f8b842aa..08041887a82 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -28,11 +28,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) if (unlikely(nb_ops == 0)) return 0; - scheduler_set_worker_session(ops, nb_ops, worker_idx); + scheduler_set_worker_sessions(ops, nb_ops, worker_idx); processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id, worker->qp_id, ops, nb_ops); if (processed_ops < nb_ops) - scheduler_retrieve_session(ops + processed_ops, + scheduler_retrieve_sessions(ops + processed_ops, nb_ops - processed_ops); worker->nb_inflight_cops += processed_ops; @@ -87,7 +87,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id, worker->qp_id, ops, nb_ops); - scheduler_retrieve_session(ops, nb_deq_ops); + scheduler_retrieve_sessions(ops, nb_deq_ops); last_worker_idx += 1; last_worker_idx %= rr_qp_ctx->nb_workers; diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c index 95a43c8801c..eca8a2a69da 100644 --- a/drivers/crypto/virtio/virtio_pci.c +++ b/drivers/crypto/virtio/virtio_pci.c @@ -14,15 +14,6 @@ #include "virtio_pci.h" #include "virtqueue.h" -/* - * Following macros are derived from linux/pci_regs.h, however, - * we can't simply include that header here, as there is no such - * file for non-Linux platform. - */ -#define PCI_CAPABILITY_LIST 0x34 -#define PCI_CAP_ID_VNDR 0x09 -#define PCI_CAP_ID_MSIX 0x11 - /* * The remaining space is defined by each driver as the per-driver * configuration space. @@ -338,13 +329,12 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) return base + offset; } -#define PCI_MSIX_ENABLE 0x8000 - static int virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) { - uint8_t pos; struct virtio_pci_cap cap; + uint16_t flags; + off_t pos; int ret; if (rte_pci_map_device(dev)) { @@ -352,44 +342,28 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) return -1; } - ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); - if (ret < 0) { - VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list"); - return -1; + /* + * Transitional devices would also have this capability, + * that's why we also check if msix is enabled. + */ + pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX); + if (pos > 0 && rte_pci_read_config(dev, &flags, sizeof(flags), + pos + RTE_PCI_MSIX_FLAGS) == sizeof(flags)) { + if (flags & RTE_PCI_MSIX_FLAGS_ENABLE) + hw->use_msix = VIRTIO_MSIX_ENABLED; + else + hw->use_msix = VIRTIO_MSIX_DISABLED; + } else { + hw->use_msix = VIRTIO_MSIX_NONE; } - while (pos) { - ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); - if (ret < 0) { - VIRTIO_CRYPTO_INIT_LOG_ERR( - "failed to read pci cap at pos: %x", pos); + pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_VNDR); + while (pos > 0) { + if (rte_pci_read_config(dev, &cap, sizeof(cap), pos) != sizeof(cap)) break; - } - - if (cap.cap_vndr == PCI_CAP_ID_MSIX) { - /* Transitional devices would also have this capability, - * that's why we also check if msix is enabled. - * 1st byte is cap ID; 2nd byte is the position of next - * cap; next two bytes are the flags. - */ - uint16_t flags = ((uint16_t *)&cap)[1]; - - if (flags & PCI_MSIX_ENABLE) - hw->use_msix = VIRTIO_MSIX_ENABLED; - else - hw->use_msix = VIRTIO_MSIX_DISABLED; - } - - if (cap.cap_vndr != PCI_CAP_ID_VNDR) { - VIRTIO_CRYPTO_INIT_LOG_DBG( - "[%2x] skipping non VNDR cap id: %02x", - pos, cap.cap_vndr); - goto next; - } - VIRTIO_CRYPTO_INIT_LOG_DBG( "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", - pos, cap.cfg_type, cap.bar, cap.offset, cap.length); + (unsigned int)pos, cap.cfg_type, cap.bar, cap.offset, cap.length); switch (cap.cfg_type) { case VIRTIO_PCI_CAP_COMMON_CFG: @@ -412,8 +386,7 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) break; } -next: - pos = cap.cap_next; + pos = rte_pci_find_next_capability(dev, RTE_PCI_CAP_ID_VNDR, pos); } if (hw->common_cfg == NULL || hw->notify_base == NULL || diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c index a6f4a31e0e3..26680edfdea 100644 --- a/drivers/dma/cnxk/cnxk_dmadev.c +++ b/drivers/dma/cnxk/cnxk_dmadev.c @@ -2,73 +2,122 @@ * Copyright (C) 2021 Marvell International Ltd. */ -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include #include +static int cnxk_stats_reset(struct rte_dma_dev *dev, uint16_t vchan); + static int -cnxk_dmadev_info_get(const struct rte_dma_dev *dev, - struct rte_dma_info *dev_info, uint32_t size) +cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, uint32_t size) { - RTE_SET_USED(dev); + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; RTE_SET_USED(size); - dev_info->max_vchans = 1; - dev_info->nb_vchans = 1; - dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | - RTE_DMA_CAPA_MEM_TO_DEV | RTE_DMA_CAPA_DEV_TO_MEM | - RTE_DMA_CAPA_DEV_TO_DEV | RTE_DMA_CAPA_OPS_COPY | - RTE_DMA_CAPA_OPS_COPY_SG; - dev_info->max_desc = DPI_MAX_DESC; - dev_info->min_desc = 1; - dev_info->max_sges = DPI_MAX_POINTER; + dev_info->max_vchans = CNXK_DPI_MAX_VCHANS_PER_QUEUE; + dev_info->nb_vchans = dpivf->num_vchans; + dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV | + RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV | + RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG; + dev_info->max_desc = CNXK_DPI_MAX_DESC; + dev_info->min_desc = CNXK_DPI_MIN_DESC; + dev_info->max_sges = CNXK_DPI_MAX_POINTER; return 0; } static int -cnxk_dmadev_configure(struct rte_dma_dev *dev, - const struct rte_dma_conf *conf, uint32_t conf_sz) +cnxk_dmadev_vchan_free(struct cnxk_dpi_vf_s *dpivf, uint16_t vchan) { + struct cnxk_dpi_conf *dpi_conf; + uint16_t num_vchans; + uint16_t max_desc; + int i, j; + + if (vchan == RTE_DMA_ALL_VCHAN) { + num_vchans = dpivf->num_vchans; + i = 0; + } else { + if (vchan >= CNXK_DPI_MAX_VCHANS_PER_QUEUE) + return -EINVAL; + + num_vchans = vchan + 1; + i = vchan; + } + + for (; i < num_vchans; i++) { + dpi_conf = &dpivf->conf[i]; + max_desc = dpi_conf->c_desc.max_cnt + 1; + if (dpi_conf->c_desc.compl_ptr) { + for (j = 0; j < max_desc; j++) + rte_free(dpi_conf->c_desc.compl_ptr[j]); + } + + rte_free(dpi_conf->c_desc.compl_ptr); + dpi_conf->c_desc.compl_ptr = NULL; + } + + return 0; +} + +static int +cnxk_dmadev_chunk_pool_create(struct rte_dma_dev *dev, uint32_t nb_chunks, uint32_t chunk_sz) +{ + char pool_name[RTE_MEMPOOL_NAMESIZE]; struct cnxk_dpi_vf_s *dpivf = NULL; - int rc = 0; + int rc; - RTE_SET_USED(conf); - RTE_SET_USED(conf); - RTE_SET_USED(conf_sz); - RTE_SET_USED(conf_sz); dpivf = dev->fp_obj->dev_private; - rc = roc_dpi_configure(&dpivf->rdpi); - if (rc < 0) - plt_err("DMA configure failed err = %d", rc); + /* Create chunk pool. */ + snprintf(pool_name, sizeof(pool_name), "cnxk_dma_chunk_pool%d", dev->data->dev_id); + + nb_chunks += (CNXK_DPI_POOL_MAX_CACHE_SZ * rte_lcore_count()); + dpivf->chunk_pool = rte_mempool_create_empty( + pool_name, nb_chunks, chunk_sz, CNXK_DPI_POOL_MAX_CACHE_SZ, 0, rte_socket_id(), 0); + + if (dpivf->chunk_pool == NULL) { + plt_err("Unable to create chunkpool."); + return -ENOMEM; + } + rc = rte_mempool_set_ops_byname(dpivf->chunk_pool, rte_mbuf_platform_mempool_ops(), NULL); + if (rc < 0) { + plt_err("Unable to set chunkpool ops"); + goto free; + } + + rc = rte_mempool_populate_default(dpivf->chunk_pool); + if (rc < 0) { + plt_err("Unable to set populate chunkpool."); + goto free; + } + dpivf->aura = roc_npa_aura_handle_to_aura(dpivf->chunk_pool->pool_id); + + return 0; + +free: + rte_mempool_free(dpivf->chunk_pool); return rc; } static int -cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, - const struct rte_dma_vchan_conf *conf, - uint32_t conf_sz) +cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, uint32_t conf_sz) { - struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; - struct cnxk_dpi_compl_s *comp_data; - union dpi_instr_hdr_s *header = &dpivf->conf.hdr; - int i; + struct cnxk_dpi_vf_s *dpivf = NULL; - RTE_SET_USED(vchan); RTE_SET_USED(conf_sz); + dpivf = dev->fp_obj->dev_private; + + /* After config function, vchan setup function has to be called. + * Free up vchan memory if any, before configuring num_vchans. + */ + cnxk_dmadev_vchan_free(dpivf, RTE_DMA_ALL_VCHAN); + dpivf->num_vchans = conf->nb_vchans; + return 0; +} + +static void +cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vchan_conf *conf) +{ header->cn9k.pt = DPI_HDR_PT_ZBW_CA; switch (conf->direction) { @@ -76,13 +125,21 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, header->cn9k.xtype = DPI_XTYPE_INBOUND; header->cn9k.lport = conf->src_port.pcie.coreid; header->cn9k.fport = 0; - header->cn9k.pvfe = 1; + header->cn9k.pvfe = conf->src_port.pcie.vfen; + if (header->cn9k.pvfe) { + header->cn9k.func = conf->src_port.pcie.pfid << 12; + header->cn9k.func |= conf->src_port.pcie.vfid; + } break; case RTE_DMA_DIR_MEM_TO_DEV: header->cn9k.xtype = DPI_XTYPE_OUTBOUND; header->cn9k.lport = 0; header->cn9k.fport = conf->dst_port.pcie.coreid; - header->cn9k.pvfe = 1; + header->cn9k.pvfe = conf->dst_port.pcie.vfen; + if (header->cn9k.pvfe) { + header->cn9k.func = conf->dst_port.pcie.pfid << 12; + header->cn9k.func |= conf->dst_port.pcie.vfid; + } break; case RTE_DMA_DIR_MEM_TO_MEM: header->cn9k.xtype = DPI_XTYPE_INTERNAL_ONLY; @@ -94,37 +151,13 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, header->cn9k.xtype = DPI_XTYPE_EXTERNAL_ONLY; header->cn9k.lport = conf->src_port.pcie.coreid; header->cn9k.fport = conf->dst_port.pcie.coreid; + header->cn9k.pvfe = 0; }; - - for (i = 0; i < conf->nb_desc; i++) { - comp_data = rte_zmalloc(NULL, sizeof(*comp_data), 0); - if (comp_data == NULL) { - plt_err("Failed to allocate for comp_data"); - return -ENOMEM; - } - comp_data->cdata = DPI_REQ_CDATA; - dpivf->conf.c_desc.compl_ptr[i] = comp_data; - }; - dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC; - dpivf->conf.c_desc.head = 0; - dpivf->conf.c_desc.tail = 0; - - return 0; } -static int -cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, - const struct rte_dma_vchan_conf *conf, - uint32_t conf_sz) +static void +cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vchan_conf *conf) { - struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; - struct cnxk_dpi_compl_s *comp_data; - union dpi_instr_hdr_s *header = &dpivf->conf.hdr; - int i; - - RTE_SET_USED(vchan); - RTE_SET_USED(conf_sz); - header->cn10k.pt = DPI_HDR_PT_ZBW_CA; switch (conf->direction) { @@ -132,13 +165,21 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, header->cn10k.xtype = DPI_XTYPE_INBOUND; header->cn10k.lport = conf->src_port.pcie.coreid; header->cn10k.fport = 0; - header->cn10k.pvfe = 1; + header->cn10k.pvfe = conf->src_port.pcie.vfen; + if (header->cn10k.pvfe) { + header->cn10k.func = conf->src_port.pcie.pfid << 12; + header->cn10k.func |= conf->src_port.pcie.vfid; + } break; case RTE_DMA_DIR_MEM_TO_DEV: header->cn10k.xtype = DPI_XTYPE_OUTBOUND; header->cn10k.lport = 0; header->cn10k.fport = conf->dst_port.pcie.coreid; - header->cn10k.pvfe = 1; + header->cn10k.pvfe = conf->dst_port.pcie.vfen; + if (header->cn10k.pvfe) { + header->cn10k.func = conf->dst_port.pcie.pfid << 12; + header->cn10k.func |= conf->dst_port.pcie.vfid; + } break; case RTE_DMA_DIR_MEM_TO_MEM: header->cn10k.xtype = DPI_XTYPE_INTERNAL_ONLY; @@ -150,472 +191,312 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, header->cn10k.xtype = DPI_XTYPE_EXTERNAL_ONLY; header->cn10k.lport = conf->src_port.pcie.coreid; header->cn10k.fport = conf->dst_port.pcie.coreid; + header->cn10k.pvfe = 0; }; - - for (i = 0; i < conf->nb_desc; i++) { - comp_data = rte_zmalloc(NULL, sizeof(*comp_data), 0); - if (comp_data == NULL) { - plt_err("Failed to allocate for comp_data"); - return -ENOMEM; - } - comp_data->cdata = DPI_REQ_CDATA; - dpivf->conf.c_desc.compl_ptr[i] = comp_data; - }; - dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC; - dpivf->conf.c_desc.head = 0; - dpivf->conf.c_desc.tail = 0; - - return 0; -} - -static int -cnxk_dmadev_start(struct rte_dma_dev *dev) -{ - struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; - - dpivf->desc_idx = 0; - dpivf->num_words = 0; - roc_dpi_enable(&dpivf->rdpi); - - return 0; } static int -cnxk_dmadev_stop(struct rte_dma_dev *dev) +cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, + const struct rte_dma_vchan_conf *conf, uint32_t conf_sz) { struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + union cnxk_dpi_instr_cmd *header; + uint16_t max_desc; + uint32_t size; + int i; - roc_dpi_disable(&dpivf->rdpi); + RTE_SET_USED(conf_sz); - return 0; -} + header = (union cnxk_dpi_instr_cmd *)&dpi_conf->cmd.u; -static int -cnxk_dmadev_close(struct rte_dma_dev *dev) -{ - struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; + if (dpivf->is_cn10k) + cn10k_dmadev_setup_hdr(header, conf); + else + cn9k_dmadev_setup_hdr(header, conf); - roc_dpi_disable(&dpivf->rdpi); - roc_dpi_dev_fini(&dpivf->rdpi); + /* Free up descriptor memory before allocating. */ + cnxk_dmadev_vchan_free(dpivf, vchan); - return 0; -} + max_desc = conf->nb_desc; + if (!rte_is_power_of_2(max_desc)) + max_desc = rte_align32pow2(max_desc); -static inline int -__dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count) -{ - uint64_t *ptr = dpi->chunk_base; + if (max_desc > CNXK_DPI_MAX_DESC) + max_desc = CNXK_DPI_MAX_DESC; - if ((cmd_count < DPI_MIN_CMD_SIZE) || (cmd_count > DPI_MAX_CMD_SIZE) || - cmds == NULL) - return -EINVAL; + size = (max_desc * sizeof(struct cnxk_dpi_compl_s *)); + dpi_conf->c_desc.compl_ptr = rte_zmalloc(NULL, size, 0); - /* - * Normally there is plenty of room in the current buffer for the - * command - */ - if (dpi->chunk_head + cmd_count < dpi->pool_size_m1) { - ptr += dpi->chunk_head; - dpi->chunk_head += cmd_count; - while (cmd_count--) - *ptr++ = *cmds++; - } else { - int count; - uint64_t *new_buff = dpi->chunk_next; + if (dpi_conf->c_desc.compl_ptr == NULL) { + plt_err("Failed to allocate for comp_data"); + return -ENOMEM; + } - dpi->chunk_next = - (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0); - if (!dpi->chunk_next) { - plt_err("Failed to alloc next buffer from NPA"); + for (i = 0; i < max_desc; i++) { + dpi_conf->c_desc.compl_ptr[i] = + rte_zmalloc(NULL, sizeof(struct cnxk_dpi_compl_s), 0); + if (!dpi_conf->c_desc.compl_ptr[i]) { + plt_err("Failed to allocate for descriptor memory"); return -ENOMEM; } - /* - * Figure out how many cmd words will fit in this buffer. - * One location will be needed for the next buffer pointer. - */ - count = dpi->pool_size_m1 - dpi->chunk_head; - ptr += dpi->chunk_head; - cmd_count -= count; - while (count--) - *ptr++ = *cmds++; - - /* - * chunk next ptr is 2 DWORDS - * second DWORD is reserved. - */ - *ptr++ = (uint64_t)new_buff; - *ptr = 0; - - /* - * The current buffer is full and has a link to the next - * buffers. Time to write the rest of the commands into the new - * buffer. - */ - dpi->chunk_base = new_buff; - dpi->chunk_head = cmd_count; - ptr = new_buff; - while (cmd_count--) - *ptr++ = *cmds++; - - /* queue index may be greater than pool size */ - if (dpi->chunk_head >= dpi->pool_size_m1) { - new_buff = dpi->chunk_next; - dpi->chunk_next = - (void *)roc_npa_aura_op_alloc(dpi->aura_handle, - 0); - if (!dpi->chunk_next) { - plt_err("Failed to alloc next buffer from NPA"); - return -ENOMEM; - } - /* Write next buffer address */ - *ptr = (uint64_t)new_buff; - dpi->chunk_base = new_buff; - dpi->chunk_head = 0; - } + dpi_conf->c_desc.compl_ptr[i]->cdata = CNXK_DPI_REQ_CDATA; } + dpi_conf->c_desc.max_cnt = (max_desc - 1); + return 0; } static int -cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, - rte_iova_t dst, uint32_t length, uint64_t flags) +cnxk_dmadev_start(struct rte_dma_dev *dev) { - struct cnxk_dpi_vf_s *dpivf = dev_private; - union dpi_instr_hdr_s *header = &dpivf->conf.hdr; - struct cnxk_dpi_compl_s *comp_ptr; - rte_iova_t fptr, lptr; - int num_words = 0; - int rc; - - RTE_SET_USED(vchan); - - comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail]; - comp_ptr->cdata = DPI_REQ_CDATA; - header->cn9k.ptr = (uint64_t)comp_ptr; - STRM_INC(dpivf->conf.c_desc); - - header->cn9k.nfst = 1; - header->cn9k.nlst = 1; - - /* - * For inbound case, src pointers are last pointers. - * For all other cases, src pointers are first pointers. - */ - if (header->cn9k.xtype == DPI_XTYPE_INBOUND) { - fptr = dst; - lptr = src; - } else { - fptr = src; - lptr = dst; - } - - dpivf->cmd[0] = header->u[0]; - dpivf->cmd[1] = header->u[1]; - dpivf->cmd[2] = header->u[2]; - /* word3 is always 0 */ - num_words += 4; - dpivf->cmd[num_words++] = length; - dpivf->cmd[num_words++] = fptr; - dpivf->cmd[num_words++] = length; - dpivf->cmd[num_words++] = lptr; - - rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words); - if (!rc) { - if (flags & RTE_DMA_OP_FLAG_SUBMIT) { - rte_wmb(); - plt_write64(num_words, - dpivf->rdpi.rbase + DPI_VDMA_DBELL); - dpivf->stats.submitted++; + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; + struct cnxk_dpi_conf *dpi_conf; + uint32_t chunks, nb_desc = 0; + int i, j, rc = 0; + void *chunk; + + for (i = 0; i < dpivf->num_vchans; i++) { + dpi_conf = &dpivf->conf[i]; + dpi_conf->c_desc.head = 0; + dpi_conf->c_desc.tail = 0; + dpi_conf->pnum_words = 0; + dpi_conf->pending = 0; + dpi_conf->desc_idx = 0; + for (j = 0; j < dpi_conf->c_desc.max_cnt + 1; j++) { + if (dpi_conf->c_desc.compl_ptr[j]) + dpi_conf->c_desc.compl_ptr[j]->cdata = CNXK_DPI_REQ_CDATA; } - dpivf->num_words += num_words; + nb_desc += dpi_conf->c_desc.max_cnt + 1; + cnxk_stats_reset(dev, i); + dpi_conf->completed_offset = 0; } - return dpivf->desc_idx++; -} - -static int -cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, - const struct rte_dma_sge *src, - const struct rte_dma_sge *dst, - uint16_t nb_src, uint16_t nb_dst, uint64_t flags) -{ - struct cnxk_dpi_vf_s *dpivf = dev_private; - union dpi_instr_hdr_s *header = &dpivf->conf.hdr; - const struct rte_dma_sge *fptr, *lptr; - struct cnxk_dpi_compl_s *comp_ptr; - int num_words = 0; - int i, rc; - - RTE_SET_USED(vchan); - - comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail]; - comp_ptr->cdata = DPI_REQ_CDATA; - header->cn9k.ptr = (uint64_t)comp_ptr; - STRM_INC(dpivf->conf.c_desc); - - /* - * For inbound case, src pointers are last pointers. - * For all other cases, src pointers are first pointers. - */ - if (header->cn9k.xtype == DPI_XTYPE_INBOUND) { - header->cn9k.nfst = nb_dst & 0xf; - header->cn9k.nlst = nb_src & 0xf; - fptr = &dst[0]; - lptr = &src[0]; - } else { - header->cn9k.nfst = nb_src & 0xf; - header->cn9k.nlst = nb_dst & 0xf; - fptr = &src[0]; - lptr = &dst[0]; + chunks = CNXK_DPI_CHUNKS_FROM_DESC(CNXK_DPI_QUEUE_BUF_SIZE, nb_desc); + rc = cnxk_dmadev_chunk_pool_create(dev, chunks, CNXK_DPI_QUEUE_BUF_SIZE); + if (rc < 0) { + plt_err("DMA pool configure failed err = %d", rc); + goto done; } - dpivf->cmd[0] = header->u[0]; - dpivf->cmd[1] = header->u[1]; - dpivf->cmd[2] = header->u[2]; - num_words += 4; - for (i = 0; i < header->cn9k.nfst; i++) { - dpivf->cmd[num_words++] = (uint64_t)fptr->length; - dpivf->cmd[num_words++] = fptr->addr; - fptr++; + rc = rte_mempool_get(dpivf->chunk_pool, &chunk); + if (rc < 0) { + plt_err("DMA failed to get chunk pointer err = %d", rc); + rte_mempool_free(dpivf->chunk_pool); + goto done; } - for (i = 0; i < header->cn9k.nlst; i++) { - dpivf->cmd[num_words++] = (uint64_t)lptr->length; - dpivf->cmd[num_words++] = lptr->addr; - lptr++; + rc = roc_dpi_configure(&dpivf->rdpi, CNXK_DPI_QUEUE_BUF_SIZE, dpivf->aura, (uint64_t)chunk); + if (rc < 0) { + plt_err("DMA configure failed err = %d", rc); + rte_mempool_free(dpivf->chunk_pool); + goto done; } - rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words); - if (!rc) { - if (flags & RTE_DMA_OP_FLAG_SUBMIT) { - rte_wmb(); - plt_write64(num_words, - dpivf->rdpi.rbase + DPI_VDMA_DBELL); - dpivf->stats.submitted += nb_src; - } - dpivf->num_words += num_words; - } + dpivf->chunk_base = chunk; + dpivf->chunk_head = 0; + dpivf->chunk_size_m1 = (CNXK_DPI_QUEUE_BUF_SIZE >> 3) - 2; - return (rc < 0) ? rc : dpivf->desc_idx++; + roc_dpi_enable(&dpivf->rdpi); + +done: + return rc; } static int -cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, - rte_iova_t dst, uint32_t length, uint64_t flags) +cnxk_dmadev_stop(struct rte_dma_dev *dev) { - struct cnxk_dpi_vf_s *dpivf = dev_private; - union dpi_instr_hdr_s *header = &dpivf->conf.hdr; - struct cnxk_dpi_compl_s *comp_ptr; - rte_iova_t fptr, lptr; - int num_words = 0; - int rc; + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; + uint64_t reg; - RTE_SET_USED(vchan); - - comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail]; - comp_ptr->cdata = DPI_REQ_CDATA; - header->cn10k.ptr = (uint64_t)comp_ptr; - STRM_INC(dpivf->conf.c_desc); - - header->cn10k.nfst = 1; - header->cn10k.nlst = 1; - - fptr = src; - lptr = dst; - - dpivf->cmd[0] = header->u[0]; - dpivf->cmd[1] = header->u[1]; - dpivf->cmd[2] = header->u[2]; - /* word3 is always 0 */ - num_words += 4; - dpivf->cmd[num_words++] = length; - dpivf->cmd[num_words++] = fptr; - dpivf->cmd[num_words++] = length; - dpivf->cmd[num_words++] = lptr; - - rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words); - if (!rc) { - if (flags & RTE_DMA_OP_FLAG_SUBMIT) { - rte_wmb(); - plt_write64(num_words, - dpivf->rdpi.rbase + DPI_VDMA_DBELL); - dpivf->stats.submitted++; - } - dpivf->num_words += num_words; - } + reg = plt_read64(dpivf->rdpi.rbase + DPI_VDMA_SADDR); + while (!(reg & BIT_ULL(63))) + reg = plt_read64(dpivf->rdpi.rbase + DPI_VDMA_SADDR); + + roc_dpi_disable(&dpivf->rdpi); + rte_mempool_free(dpivf->chunk_pool); + dpivf->chunk_pool = NULL; + dpivf->chunk_base = NULL; + dpivf->chunk_size_m1 = 0; - return dpivf->desc_idx++; + return 0; } static int -cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, - const struct rte_dma_sge *src, - const struct rte_dma_sge *dst, uint16_t nb_src, - uint16_t nb_dst, uint64_t flags) +cnxk_dmadev_close(struct rte_dma_dev *dev) { - struct cnxk_dpi_vf_s *dpivf = dev_private; - union dpi_instr_hdr_s *header = &dpivf->conf.hdr; - const struct rte_dma_sge *fptr, *lptr; - struct cnxk_dpi_compl_s *comp_ptr; - int num_words = 0; - int i, rc; - - RTE_SET_USED(vchan); - - comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail]; - comp_ptr->cdata = DPI_REQ_CDATA; - header->cn10k.ptr = (uint64_t)comp_ptr; - STRM_INC(dpivf->conf.c_desc); - - header->cn10k.nfst = nb_src & 0xf; - header->cn10k.nlst = nb_dst & 0xf; - fptr = &src[0]; - lptr = &dst[0]; - - dpivf->cmd[0] = header->u[0]; - dpivf->cmd[1] = header->u[1]; - dpivf->cmd[2] = header->u[2]; - num_words += 4; - - for (i = 0; i < header->cn10k.nfst; i++) { - dpivf->cmd[num_words++] = (uint64_t)fptr->length; - dpivf->cmd[num_words++] = fptr->addr; - fptr++; - } + struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; - for (i = 0; i < header->cn10k.nlst; i++) { - dpivf->cmd[num_words++] = (uint64_t)lptr->length; - dpivf->cmd[num_words++] = lptr->addr; - lptr++; - } + roc_dpi_disable(&dpivf->rdpi); + cnxk_dmadev_vchan_free(dpivf, RTE_DMA_ALL_VCHAN); + roc_dpi_dev_fini(&dpivf->rdpi); - rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words); - if (!rc) { - if (flags & RTE_DMA_OP_FLAG_SUBMIT) { - rte_wmb(); - plt_write64(num_words, - dpivf->rdpi.rbase + DPI_VDMA_DBELL); - dpivf->stats.submitted += nb_src; - } - dpivf->num_words += num_words; - } + /* Clear all flags as we close the device. */ + dpivf->flag = 0; - return (rc < 0) ? rc : dpivf->desc_idx++; + return 0; } static uint16_t -cnxk_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls, - uint16_t *last_idx, bool *has_error) +cnxk_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls, uint16_t *last_idx, + bool *has_error) { struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + struct cnxk_dpi_cdesc_data_s *c_desc = &dpi_conf->c_desc; + struct cnxk_dpi_compl_s *comp_ptr; int cnt; - RTE_SET_USED(vchan); - - if (dpivf->stats.submitted == dpivf->stats.completed) - return 0; - for (cnt = 0; cnt < nb_cpls; cnt++) { - struct cnxk_dpi_compl_s *comp_ptr = - dpivf->conf.c_desc.compl_ptr[cnt]; + comp_ptr = c_desc->compl_ptr[c_desc->head]; if (comp_ptr->cdata) { - if (comp_ptr->cdata == DPI_REQ_CDATA) + if (comp_ptr->cdata == CNXK_DPI_REQ_CDATA) break; *has_error = 1; - dpivf->stats.errors++; + dpi_conf->stats.errors++; + CNXK_DPI_STRM_INC(*c_desc, head); break; } + + comp_ptr->cdata = CNXK_DPI_REQ_CDATA; + CNXK_DPI_STRM_INC(*c_desc, head); } - *last_idx = cnt - 1; - dpivf->conf.c_desc.tail = cnt; - dpivf->stats.completed += cnt; + dpi_conf->stats.completed += cnt; + *last_idx = (dpi_conf->completed_offset + dpi_conf->stats.completed - 1) & 0xffff; return cnt; } static uint16_t -cnxk_dmadev_completed_status(void *dev_private, uint16_t vchan, - const uint16_t nb_cpls, uint16_t *last_idx, - enum rte_dma_status_code *status) +cnxk_dmadev_completed_status(void *dev_private, uint16_t vchan, const uint16_t nb_cpls, + uint16_t *last_idx, enum rte_dma_status_code *status) { struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + struct cnxk_dpi_cdesc_data_s *c_desc = &dpi_conf->c_desc; + struct cnxk_dpi_compl_s *comp_ptr; int cnt; - RTE_SET_USED(vchan); - RTE_SET_USED(last_idx); for (cnt = 0; cnt < nb_cpls; cnt++) { - struct cnxk_dpi_compl_s *comp_ptr = - dpivf->conf.c_desc.compl_ptr[cnt]; + comp_ptr = c_desc->compl_ptr[c_desc->head]; status[cnt] = comp_ptr->cdata; if (status[cnt]) { - if (status[cnt] == DPI_REQ_CDATA) + if (status[cnt] == CNXK_DPI_REQ_CDATA) break; - dpivf->stats.errors++; + dpi_conf->stats.errors++; } + comp_ptr->cdata = CNXK_DPI_REQ_CDATA; + CNXK_DPI_STRM_INC(*c_desc, head); } - *last_idx = cnt - 1; - dpivf->conf.c_desc.tail = 0; - dpivf->stats.completed += cnt; + dpi_conf->stats.completed += cnt; + *last_idx = (dpi_conf->completed_offset + dpi_conf->stats.completed - 1) & 0xffff; return cnt; } +static uint16_t +cnxk_damdev_burst_capacity(const void *dev_private, uint16_t vchan) +{ + const struct cnxk_dpi_vf_s *dpivf = (const struct cnxk_dpi_vf_s *)dev_private; + const struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + uint16_t burst_cap; + + burst_cap = dpi_conf->c_desc.max_cnt - + ((dpi_conf->stats.submitted - dpi_conf->stats.completed) + dpi_conf->pending) + + 1; + + return burst_cap; +} + static int -cnxk_dmadev_submit(void *dev_private, uint16_t vchan __rte_unused) +cnxk_dmadev_submit(void *dev_private, uint16_t vchan) { struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + uint32_t num_words = dpi_conf->pnum_words; + + if (!dpi_conf->pnum_words) + return 0; rte_wmb(); - plt_write64(dpivf->num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL); - dpivf->stats.submitted++; + plt_write64(num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL); + + dpi_conf->stats.submitted += dpi_conf->pending; + dpi_conf->pnum_words = 0; + dpi_conf->pending = 0; return 0; } static int -cnxk_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, - struct rte_dma_stats *rte_stats, uint32_t size) +cnxk_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, struct rte_dma_stats *rte_stats, + uint32_t size) { struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; - struct rte_dma_stats *stats = &dpivf->stats; - - RTE_SET_USED(vchan); + struct cnxk_dpi_conf *dpi_conf; + int i; if (size < sizeof(rte_stats)) return -EINVAL; if (rte_stats == NULL) return -EINVAL; - *rte_stats = *stats; + /* Stats of all vchans requested. */ + if (vchan == RTE_DMA_ALL_VCHAN) { + for (i = 0; i < dpivf->num_vchans; i++) { + dpi_conf = &dpivf->conf[i]; + rte_stats->submitted += dpi_conf->stats.submitted; + rte_stats->completed += dpi_conf->stats.completed; + rte_stats->errors += dpi_conf->stats.errors; + } + + goto done; + } + + if (vchan >= CNXK_DPI_MAX_VCHANS_PER_QUEUE) + return -EINVAL; + + dpi_conf = &dpivf->conf[vchan]; + *rte_stats = dpi_conf->stats; + +done: return 0; } static int -cnxk_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused) +cnxk_stats_reset(struct rte_dma_dev *dev, uint16_t vchan) { struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private; + struct cnxk_dpi_conf *dpi_conf; + int i; + + /* clear stats of all vchans. */ + if (vchan == RTE_DMA_ALL_VCHAN) { + for (i = 0; i < dpivf->num_vchans; i++) { + dpi_conf = &dpivf->conf[i]; + dpi_conf->completed_offset += dpi_conf->stats.completed; + dpi_conf->stats = (struct rte_dma_stats){0}; + } + + return 0; + } + + if (vchan >= CNXK_DPI_MAX_VCHANS_PER_QUEUE) + return -EINVAL; + + dpi_conf = &dpivf->conf[vchan]; + dpi_conf->completed_offset += dpi_conf->stats.completed; + dpi_conf->stats = (struct rte_dma_stats){0}; - dpivf->stats = (struct rte_dma_stats){0}; return 0; } -static const struct rte_dma_dev_ops cn10k_dmadev_ops = { - .dev_close = cnxk_dmadev_close, - .dev_configure = cnxk_dmadev_configure, - .dev_info_get = cnxk_dmadev_info_get, - .dev_start = cnxk_dmadev_start, - .dev_stop = cnxk_dmadev_stop, - .stats_get = cnxk_stats_get, - .stats_reset = cnxk_stats_reset, - .vchan_setup = cn10k_dmadev_vchan_setup, -}; - static const struct rte_dma_dev_ops cnxk_dmadev_ops = { .dev_close = cnxk_dmadev_close, .dev_configure = cnxk_dmadev_configure, @@ -628,8 +509,7 @@ static const struct rte_dma_dev_ops cnxk_dmadev_ops = { }; static int -cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) +cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { struct cnxk_dpi_vf_s *dpivf = NULL; char name[RTE_DEV_NAME_MAX_LEN]; @@ -648,8 +528,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, memset(name, 0, sizeof(name)); rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); - dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, - sizeof(*dpivf)); + dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, sizeof(*dpivf)); if (dmadev == NULL) { plt_err("dma device allocation failed for %s", name); return -ENOMEM; @@ -666,11 +545,10 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, dmadev->fp_obj->submit = cnxk_dmadev_submit; dmadev->fp_obj->completed = cnxk_dmadev_completed; dmadev->fp_obj->completed_status = cnxk_dmadev_completed_status; + dmadev->fp_obj->burst_capacity = cnxk_damdev_burst_capacity; - if (pci_dev->id.subsystem_device_id == PCI_SUBSYSTEM_DEVID_CN10KA || - pci_dev->id.subsystem_device_id == PCI_SUBSYSTEM_DEVID_CNF10KA || - pci_dev->id.subsystem_device_id == PCI_SUBSYSTEM_DEVID_CN10KB) { - dmadev->dev_ops = &cn10k_dmadev_ops; + if (roc_model_is_cn10k()) { + dpivf->is_cn10k = true; dmadev->fp_obj->copy = cn10k_dmadev_copy; dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg; } @@ -682,6 +560,8 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, if (rc < 0) goto err_out_free; + dmadev->state = RTE_DMA_DEV_READY; + return 0; err_out_free: @@ -703,20 +583,17 @@ cnxk_dmadev_remove(struct rte_pci_device *pci_dev) } static const struct rte_pci_id cnxk_dma_pci_map[] = { - { - RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_CNXK_DPI_VF) - }, + {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_DPI_VF)}, { .vendor_id = 0, }, }; static struct rte_pci_driver cnxk_dmadev = { - .id_table = cnxk_dma_pci_map, + .id_table = cnxk_dma_pci_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, - .probe = cnxk_dmadev_probe, - .remove = cnxk_dmadev_remove, + .probe = cnxk_dmadev_probe, + .remove = cnxk_dmadev_remove, }; RTE_PMD_REGISTER_PCI(cnxk_dmadev_pci_driver, cnxk_dmadev); diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h index e1f5694f507..350ae73b5cd 100644 --- a/drivers/dma/cnxk/cnxk_dmadev.h +++ b/drivers/dma/cnxk/cnxk_dmadev.h @@ -4,16 +4,84 @@ #ifndef CNXK_DMADEV_H #define CNXK_DMADEV_H -#define DPI_MAX_POINTER 15 -#define DPI_QUEUE_STOP 0x0 -#define DPI_QUEUE_START 0x1 -#define STRM_INC(s) ((s).tail = ((s).tail + 1) % (s).max_cnt) -#define DPI_MAX_DESC 1024 +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CNXK_DPI_MAX_POINTER 15 +#define CNXK_DPI_STRM_INC(s, var) ((s).var = ((s).var + 1) & (s).max_cnt) +#define CNXK_DPI_STRM_DEC(s, var) ((s).var = ((s).var - 1) == -1 ? (s).max_cnt : \ + ((s).var - 1)) +#define CNXK_DPI_MAX_DESC 32768 +#define CNXK_DPI_MIN_DESC 2 +#define CNXK_DPI_MAX_VCHANS_PER_QUEUE 4 +#define CNXK_DPI_QUEUE_BUF_SIZE 16256 +#define CNXK_DPI_POOL_MAX_CACHE_SZ (16) +#define CNXK_DPI_DW_PER_SINGLE_CMD 8 +#define CNXK_DPI_HDR_LEN 4 +#define CNXK_DPI_CMD_LEN(src, dst) (CNXK_DPI_HDR_LEN + ((src) << 1) + ((dst) << 1)) +#define CNXK_DPI_MAX_CMD_SZ CNXK_DPI_CMD_LEN(CNXK_DPI_MAX_POINTER, \ + CNXK_DPI_MAX_POINTER) +#define CNXK_DPI_CHUNKS_FROM_DESC(cz, desc) (((desc) / (((cz) / 8) / CNXK_DPI_MAX_CMD_SZ)) + 1) /* Set Completion data to 0xFF when request submitted, * upon successful request completion engine reset to completion status */ -#define DPI_REQ_CDATA 0xFF +#define CNXK_DPI_REQ_CDATA 0xFF + +union cnxk_dpi_instr_cmd { + uint64_t u; + struct cn9k_dpi_instr_cmd { + uint64_t aura : 20; + uint64_t func : 16; + uint64_t pt : 2; + uint64_t reserved_102 : 1; + uint64_t pvfe : 1; + uint64_t fl : 1; + uint64_t ii : 1; + uint64_t fi : 1; + uint64_t ca : 1; + uint64_t csel : 1; + uint64_t reserved_109_111 : 3; + uint64_t xtype : 2; + uint64_t reserved_114_119 : 6; + uint64_t fport : 2; + uint64_t reserved_122_123 : 2; + uint64_t lport : 2; + uint64_t reserved_126_127 : 2; + /* Word 1 - End */ + } cn9k; + + struct cn10k_dpi_instr_cmd { + uint64_t nfst : 4; + uint64_t reserved_4_5 : 2; + uint64_t nlst : 4; + uint64_t reserved_10_11 : 2; + uint64_t pvfe : 1; + uint64_t reserved_13 : 1; + uint64_t func : 16; + uint64_t aura : 20; + uint64_t xtype : 2; + uint64_t reserved_52_53 : 2; + uint64_t pt : 2; + uint64_t fport : 2; + uint64_t reserved_58_59 : 2; + uint64_t lport : 2; + uint64_t reserved_62_63 : 2; + /* Word 0 - End */ + } cn10k; +}; struct cnxk_dpi_compl_s { uint64_t cdata; @@ -21,24 +89,46 @@ struct cnxk_dpi_compl_s { }; struct cnxk_dpi_cdesc_data_s { - struct cnxk_dpi_compl_s *compl_ptr[DPI_MAX_DESC]; + struct cnxk_dpi_compl_s **compl_ptr; uint16_t max_cnt; uint16_t head; uint16_t tail; }; struct cnxk_dpi_conf { - union dpi_instr_hdr_s hdr; + union cnxk_dpi_instr_cmd cmd; struct cnxk_dpi_cdesc_data_s c_desc; + uint16_t pnum_words; + uint16_t pending; + uint16_t desc_idx; + struct rte_dma_stats stats; + uint64_t completed_offset; }; struct cnxk_dpi_vf_s { + /* Fast path */ + uint64_t *chunk_base; + uint16_t chunk_head; + uint16_t chunk_size_m1; + struct rte_mempool *chunk_pool; + struct cnxk_dpi_conf conf[CNXK_DPI_MAX_VCHANS_PER_QUEUE]; + /* Slow path */ struct roc_dpi rdpi; - struct cnxk_dpi_conf conf; - struct rte_dma_stats stats; - uint64_t cmd[DPI_MAX_CMD_SIZE]; - uint32_t num_words; - uint16_t desc_idx; -}; + uint32_t aura; + uint16_t num_vchans; + uint16_t flag; + uint8_t is_cn10k; +} __plt_cache_aligned; + +int cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t dst, + uint32_t length, uint64_t flags); +int cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, + uint64_t flags); +int cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t dst, + uint32_t length, uint64_t flags); +int cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, + uint64_t flags); #endif diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c new file mode 100644 index 00000000000..16d7b5426bb --- /dev/null +++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2021 Marvell International Ltd. + */ + +#include + +#include "cnxk_dmadev.h" + +static __plt_always_inline void +__dpi_cpy_scalar(uint64_t *src, uint64_t *dst, uint8_t n) +{ + uint8_t i; + + for (i = 0; i < n; i++) + dst[i] = src[i]; +} + +#if defined(RTE_ARCH_ARM64) +static __plt_always_inline void +__dpi_cpy_vector(uint64_t *src, uint64_t *dst, uint8_t n) +{ + uint64x2_t vec; + uint8_t i; + + for (i = 0; i < n; i += 2) { + vec = vld1q_u64((const uint64_t *)&src[i]); + vst1q_u64(&dst[i], vec); + } +} + +static __plt_always_inline void +__dpi_cpy_vector_sg(const struct rte_dma_sge *src, uint64_t *dst, uint16_t n) +{ + uint64x2_t mask = {0xFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL}; + uint64x2_t vec; + uint8_t i; + + for (i = 0; i < n; i++) { + vec = vld1q_u64((const uint64_t *)&src[i]); + vec = vextq_u64(vec, vec, 1); + vec = vandq_u64(vec, mask); + vst1q_u64(dst, vec); + dst += 2; + } +} + +static __plt_always_inline uint8_t +__dpi_cpy_vector_sg_lmt(const struct rte_dma_sge *src, uint64_t *dst, uint16_t n, uint16_t lmt) +{ + uint64x2_t mask = {0xFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL}; + uint64x2_t vec; + uint8_t i; + + for (i = 0; i < n && lmt; i++) { + vec = vld1q_u64((const uint64_t *)&src[i]); + vec = vextq_u64(vec, vec, 1); + vec = vandq_u64(vec, mask); + vst1q_u64(dst, vec); + dst += 2; + lmt -= 2; + } + + return i; +} +#else +static __plt_always_inline void +__dpi_cpy_scalar_sg(const struct rte_dma_sge *src, uint64_t *dst, uint16_t n) +{ + uint8_t i; + + for (i = 0; i < n; i++) { + *dst++ = src[i].length; + *dst++ = src[i].addr; + } +} + +static __plt_always_inline uint8_t +__dpi_cpy_scalar_sg_lmt(const struct rte_dma_sge *src, uint64_t *dst, uint16_t n, uint16_t lmt) +{ + uint8_t i; + + for (i = 0; i < n && lmt; i++) { + *dst++ = src[i].length; + *dst++ = src[i].addr; + lmt -= 2; + } + + return i; +} +#endif + +static __plt_always_inline void +__dpi_cpy(uint64_t *src, uint64_t *dst, uint8_t n) +{ +#if defined(RTE_ARCH_ARM64) + __dpi_cpy_vector(src, dst, n); +#else + __dpi_cpy_scalar(src, dst, n); +#endif +} + +static __plt_always_inline void +__dpi_cpy_sg(const struct rte_dma_sge *src, uint64_t *dst, uint16_t n) +{ +#if defined(RTE_ARCH_ARM64) + __dpi_cpy_vector_sg(src, dst, n); +#else + __dpi_cpy_scalar_sg(src, dst, n); +#endif +} + +static __plt_always_inline uint8_t +__dpi_cpy_sg_lmt(const struct rte_dma_sge *src, uint64_t *dst, uint16_t n, uint16_t lmt) +{ +#if defined(RTE_ARCH_ARM64) + return __dpi_cpy_vector_sg_lmt(src, dst, n, lmt); +#else + return __dpi_cpy_scalar_sg_lmt(src, dst, n, lmt); +#endif +} + +static __plt_always_inline int +__dpi_queue_write_single(struct cnxk_dpi_vf_s *dpi, uint64_t *cmd) +{ + uint64_t *ptr = dpi->chunk_base; + + /* Check if command fits in the current chunk. */ + if (dpi->chunk_head + CNXK_DPI_DW_PER_SINGLE_CMD < dpi->chunk_size_m1) { + ptr += dpi->chunk_head; + + __dpi_cpy_scalar(cmd, ptr, CNXK_DPI_DW_PER_SINGLE_CMD); + dpi->chunk_head += CNXK_DPI_DW_PER_SINGLE_CMD; + } else { + uint64_t *new_buff = NULL; + int count; + + if (rte_mempool_get(dpi->chunk_pool, (void **)&new_buff) < 0) { + plt_dpi_dbg("Failed to alloc next buffer from NPA"); + return -ENOSPC; + } + + /* + * Figure out how many cmd words will fit in the current chunk + * and copy them. + */ + count = dpi->chunk_size_m1 - dpi->chunk_head; + ptr += dpi->chunk_head; + + __dpi_cpy_scalar(cmd, ptr, count); + + ptr += count; + *ptr = (uint64_t)new_buff; + ptr = new_buff; + + /* Copy the remaining cmd words to new chunk. */ + __dpi_cpy_scalar(cmd + count, ptr, CNXK_DPI_DW_PER_SINGLE_CMD - count); + + dpi->chunk_base = new_buff; + dpi->chunk_head = CNXK_DPI_DW_PER_SINGLE_CMD - count; + } + + return 0; +} + +static __plt_always_inline int +__dpi_queue_write_sg(struct cnxk_dpi_vf_s *dpi, uint64_t *hdr, const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst) +{ + uint8_t cmd_len = CNXK_DPI_CMD_LEN(nb_src, nb_dst); + uint64_t *ptr = dpi->chunk_base; + + /* Check if command fits in the current chunk. */ + if (dpi->chunk_head + cmd_len < dpi->chunk_size_m1) { + ptr += dpi->chunk_head; + + __dpi_cpy(hdr, ptr, CNXK_DPI_HDR_LEN); + ptr += CNXK_DPI_HDR_LEN; + __dpi_cpy_sg(src, ptr, nb_src); + ptr += (nb_src << 1); + __dpi_cpy_sg(dst, ptr, nb_dst); + + dpi->chunk_head += cmd_len; + } else { + uint64_t *new_buff = NULL, *buf; + uint16_t count; + + if (rte_mempool_get(dpi->chunk_pool, (void **)&new_buff) < 0) { + plt_dpi_dbg("Failed to alloc next buffer from NPA"); + return -ENOSPC; + } + + /* + * Figure out how many cmd words will fit in the current chunk + * and copy them, copy the rest to the new buffer. + */ + count = dpi->chunk_size_m1 - dpi->chunk_head; + ptr += dpi->chunk_head; + buf = new_buff; + if (count <= 4) { + __dpi_cpy(hdr, ptr, count); + ptr += count; + __dpi_cpy(&hdr[count], buf, 4); + buf += (4 - count); + } else { + uint8_t i; + + __dpi_cpy(hdr, ptr, 4); + ptr += 4; + count -= 4; + + i = __dpi_cpy_sg_lmt(src, ptr, nb_src, count); + src += i; + nb_src -= i; + count -= (i << 1); + ptr += (i << 1); + + i = __dpi_cpy_sg_lmt(dst, ptr, nb_dst, count); + dst += i; + nb_dst -= i; + ptr += (i << 1); + } + *ptr = (uint64_t)new_buff; + + __dpi_cpy_sg(src, buf, nb_src); + buf += (nb_src << 1); + + __dpi_cpy_sg(dst, buf, nb_dst); + buf += (nb_dst << 1); + + dpi->chunk_base = new_buff; + dpi->chunk_head = buf - new_buff; + } + + return 0; +} + +int +cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t dst, uint32_t length, + uint64_t flags) +{ + struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + uint64_t cmd[CNXK_DPI_DW_PER_SINGLE_CMD]; + struct cnxk_dpi_compl_s *comp_ptr; + int rc; + + if (unlikely(((dpi_conf->c_desc.tail + 1) & dpi_conf->c_desc.max_cnt) == + dpi_conf->c_desc.head)) + return -ENOSPC; + + comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail]; + CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail); + + cmd[0] = (1UL << 54) | (1UL << 48); + cmd[1] = dpi_conf->cmd.u; + cmd[2] = (uint64_t)comp_ptr; + cmd[4] = length; + cmd[6] = length; + + /* + * For inbound case, src pointers are last pointers. + * For all other cases, src pointers are first pointers. + */ + if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) { + cmd[5] = dst; + cmd[7] = src; + } else { + cmd[5] = src; + cmd[7] = dst; + } + + rc = __dpi_queue_write_single(dpivf, cmd); + if (unlikely(rc)) { + CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail); + return rc; + } + + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + rte_wmb(); + plt_write64(dpi_conf->pnum_words + CNXK_DPI_DW_PER_SINGLE_CMD, + dpivf->rdpi.rbase + DPI_VDMA_DBELL); + dpi_conf->stats.submitted += dpi_conf->pending + 1; + dpi_conf->pnum_words = 0; + dpi_conf->pending = 0; + } else { + dpi_conf->pnum_words += CNXK_DPI_DW_PER_SINGLE_CMD; + dpi_conf->pending++; + } + + return dpi_conf->desc_idx++; +} + +int +cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, uint64_t flags) +{ + struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + const struct rte_dma_sge *fptr, *lptr; + struct cnxk_dpi_compl_s *comp_ptr; + uint64_t hdr[4]; + int rc; + + if (unlikely(((dpi_conf->c_desc.tail + 1) & dpi_conf->c_desc.max_cnt) == + dpi_conf->c_desc.head)) + return -ENOSPC; + + comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail]; + CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail); + + hdr[1] = dpi_conf->cmd.u; + hdr[2] = (uint64_t)comp_ptr; + + /* + * For inbound case, src pointers are last pointers. + * For all other cases, src pointers are first pointers. + */ + if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) { + fptr = dst; + lptr = src; + RTE_SWAP(nb_src, nb_dst); + } else { + fptr = src; + lptr = dst; + } + hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48; + + rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst); + if (unlikely(rc)) { + CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail); + return rc; + } + + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + rte_wmb(); + plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst), + dpivf->rdpi.rbase + DPI_VDMA_DBELL); + dpi_conf->stats.submitted += dpi_conf->pending + 1; + dpi_conf->pnum_words = 0; + dpi_conf->pending = 0; + } else { + dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst); + dpi_conf->pending++; + } + + return dpi_conf->desc_idx++; +} + +int +cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t dst, + uint32_t length, uint64_t flags) +{ + struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + uint64_t cmd[CNXK_DPI_DW_PER_SINGLE_CMD]; + struct cnxk_dpi_compl_s *comp_ptr; + int rc; + + if (unlikely(((dpi_conf->c_desc.tail + 1) & dpi_conf->c_desc.max_cnt) == + dpi_conf->c_desc.head)) + return -ENOSPC; + + comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail]; + CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail); + + cmd[0] = dpi_conf->cmd.u | (1U << 6) | 1U; + cmd[1] = (uint64_t)comp_ptr; + cmd[2] = 0; + cmd[4] = length; + cmd[5] = src; + cmd[6] = length; + cmd[7] = dst; + + rc = __dpi_queue_write_single(dpivf, cmd); + if (unlikely(rc)) { + CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail); + return rc; + } + + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + rte_wmb(); + plt_write64(dpi_conf->pnum_words + CNXK_DPI_DW_PER_SINGLE_CMD, + dpivf->rdpi.rbase + DPI_VDMA_DBELL); + dpi_conf->stats.submitted += dpi_conf->pending + 1; + dpi_conf->pnum_words = 0; + dpi_conf->pending = 0; + } else { + dpi_conf->pnum_words += 8; + dpi_conf->pending++; + } + + return dpi_conf->desc_idx++; +} + +int +cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, + uint64_t flags) +{ + struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + struct cnxk_dpi_compl_s *comp_ptr; + uint64_t hdr[4]; + int rc; + + if (unlikely(((dpi_conf->c_desc.tail + 1) & dpi_conf->c_desc.max_cnt) == + dpi_conf->c_desc.head)) + return -ENOSPC; + + comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail]; + CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail); + + hdr[0] = dpi_conf->cmd.u | (nb_dst << 6) | nb_src; + hdr[1] = (uint64_t)comp_ptr; + hdr[2] = 0; + + rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst); + if (unlikely(rc)) { + CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail); + return rc; + } + + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + rte_wmb(); + plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst), + dpivf->rdpi.rbase + DPI_VDMA_DBELL); + dpi_conf->stats.submitted += dpi_conf->pending + 1; + dpi_conf->pnum_words = 0; + dpi_conf->pending = 0; + } else { + dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst); + dpi_conf->pending++; + } + + return dpi_conf->desc_idx++; +} diff --git a/drivers/dma/cnxk/meson.build b/drivers/dma/cnxk/meson.build index b868fb14cbe..e557349368d 100644 --- a/drivers/dma/cnxk/meson.build +++ b/drivers/dma/cnxk/meson.build @@ -1,6 +1,13 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(C) 2021 Marvell International Ltd. +error_cflags = ['-Wno-uninitialized'] +foreach flag: error_cflags + if cc.has_argument(flag) + cflags += flag + endif +endforeach + deps += ['bus_pci', 'common_cnxk', 'dmadev'] -sources = files('cnxk_dmadev.c') +sources = files('cnxk_dmadev.c', 'cnxk_dmadev_fp.c') require_iova_in_mbuf = false diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c index 3696c7f452f..a78889a7efa 100644 --- a/drivers/dma/idxd/idxd_pci.c +++ b/drivers/dma/idxd/idxd_pci.c @@ -196,6 +196,14 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd, pci->portals = dev->mem_resource[2].addr; pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F; + /* reset */ + idxd->u.pci = pci; + err_code = idxd_pci_dev_command(idxd, idxd_reset_device); + if (err_code) { + IDXD_PMD_ERR("Error reset device: code %#x", err_code); + goto err; + } + /* sanity check device status */ if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) { /* need function-level-reset (FLR) or is enabled */ diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c index c2d776dbbd4..eab03852ddf 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.c +++ b/drivers/dma/skeleton/skeleton_dmadev.c @@ -100,7 +100,7 @@ static int skeldma_start(struct rte_dma_dev *dev) { struct skeldma_hw *hw = dev->data->dev_private; - char name[RTE_MAX_THREAD_NAME_LEN]; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; rte_cpuset_t cpuset; int ret; @@ -127,9 +127,9 @@ skeldma_start(struct rte_dma_dev *dev) rte_mb(); - snprintf(name, sizeof(name), "dma_skel_%d", dev->data->dev_id); - ret = rte_thread_create_control(&hw->thread, name, NULL, - cpucopy_thread, dev); + snprintf(name, sizeof(name), "dma-skel%d", dev->data->dev_id); + ret = rte_thread_create_internal_control(&hw->thread, name, + cpucopy_thread, dev); if (ret) { SKELDMA_LOG(ERR, "Start cpucopy thread fail!"); return -EINVAL; @@ -137,7 +137,7 @@ skeldma_start(struct rte_dma_dev *dev) if (hw->lcore_id != -1) { cpuset = rte_lcore_cpuset(hw->lcore_id); - ret = rte_thread_get_affinity_by_id(hw->thread, &cpuset); + ret = rte_thread_set_affinity_by_id(hw->thread, &cpuset); if (ret) SKELDMA_LOG(WARNING, "Set thread affinity lcore = %d fail!", diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 499a3aace74..bb0c9105535 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -66,21 +66,21 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id) } static int -cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link) +cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link, uint8_t profile) { struct cnxk_sso_evdev *dev = arg; struct cn10k_sso_hws *ws = port; - return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link); + return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link, profile); } static int -cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link) +cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link, uint8_t profile) { struct cnxk_sso_evdev *dev = arg; struct cn10k_sso_hws *ws = port; - return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link); + return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link, profile); } static void @@ -107,10 +107,11 @@ cn10k_sso_hws_release(void *arg, void *hws) { struct cnxk_sso_evdev *dev = arg; struct cn10k_sso_hws *ws = hws; - uint16_t i; + uint16_t i, j; - for (i = 0; i < dev->nb_event_queues; i++) - roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1); + for (i = 0; i < CNXK_SSO_MAX_PROFILES; i++) + for (j = 0; j < dev->nb_event_queues; j++) + roc_sso_hws_unlink(&dev->sso, ws->hws_id, &j, 1, i); memset(ws, 0, sizeof(*ws)); } @@ -118,6 +119,7 @@ static int cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cnxk_handle_event_t fn, void *arg) { + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg); uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; struct cn10k_sso_hws *ws = hws; uint64_t cq_ds_cnt = 1; @@ -128,6 +130,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, plt_write64(0, base + SSO_LF_GGRP_QCTL); + roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); req = queue_id; /* GGRP ID */ req |= BIT_ULL(18); /* Grouped */ @@ -162,6 +165,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, return -EAGAIN; plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); + roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); rte_mb(); return 0; @@ -181,6 +185,7 @@ cn10k_sso_hws_reset(void *arg, void *hws) uint8_t pend_tt; bool is_pend; + roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); /* Wait till getwork/swtp/waitw/desched completes. */ is_pend = false; @@ -200,12 +205,14 @@ cn10k_sso_hws_reset(void *arg, void *hws) cnxk_sso_hws_swtag_untag(base + SSOW_LF_GWS_OP_SWTAG_UNTAG); plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); + } else if (pend_tt != SSO_TT_EMPTY) { + plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); } /* Wait for desched to complete. */ do { pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); - } while (pend_state & BIT_ULL(58)); + } while (pend_state & (BIT_ULL(58) | BIT_ULL(56))); switch (dev->gw_mode) { case CN10K_GW_MODE_PREF: @@ -237,6 +244,7 @@ cn10k_sso_hws_reset(void *arg, void *hws) } plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); + roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); rte_mb(); } @@ -475,6 +483,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev) CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq); event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue; + event_dev->profile_switch = cn10k_sso_hws_profile_switch; #else RTE_SET_USED(event_dev); #endif @@ -582,11 +591,16 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, cn10k_sso_hws_get_work_empty(ws, &ev, (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | NIX_RX_MULTI_SEG_F); - if (is_pend && ev.u64) { + if (is_pend && ev.u64) if (flush_cb) flush_cb(event_dev->data->dev_id, ev, args); + ptag = (plt_read64(ws->base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY; + if (ptag != SSO_TT_EMPTY) cnxk_sso_hws_swtag_flush(ws->base); - } + + do { + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & BIT_ULL(56)); /* Check if we have work in PRF_WQE0, if so extract it. */ switch (dev->gw_mode) { @@ -610,17 +624,19 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, if (ev.u64) { if (flush_cb) flush_cb(event_dev->data->dev_id, ev, args); - cnxk_sso_hws_swtag_flush(ws->base); } + cnxk_sso_hws_swtag_flush(ws->base); + do { + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & BIT_ULL(56)); } ws->swtag_req = 0; plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); } static int -cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port, - const uint8_t queues[], const uint8_t priorities[], - uint16_t nb_links) +cn10k_sso_port_link_profile(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links, uint8_t profile) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uint16_t hwgrp_ids[nb_links]; @@ -629,14 +645,14 @@ cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port, RTE_SET_USED(priorities); for (link = 0; link < nb_links; link++) hwgrp_ids[link] = queues[link]; - nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links); + nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links, profile); return (int)nb_links; } static int -cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, - uint8_t queues[], uint16_t nb_unlinks) +cn10k_sso_port_unlink_profile(struct rte_eventdev *event_dev, void *port, uint8_t queues[], + uint16_t nb_unlinks, uint8_t profile) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uint16_t hwgrp_ids[nb_unlinks]; @@ -644,11 +660,25 @@ cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, for (unlink = 0; unlink < nb_unlinks; unlink++) hwgrp_ids[unlink] = queues[unlink]; - nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks); + nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks, profile); return (int)nb_unlinks; } +static int +cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links) +{ + return cn10k_sso_port_link_profile(event_dev, port, queues, priorities, nb_links, 0); +} + +static int +cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, uint8_t queues[], + uint16_t nb_unlinks) +{ + return cn10k_sso_port_unlink_profile(event_dev, port, queues, nb_unlinks, 0); +} + static void cn10k_sso_configure_queue_stash(struct rte_eventdev *event_dev) { @@ -670,7 +700,9 @@ cn10k_sso_configure_queue_stash(struct rte_eventdev *event_dev) static int cn10k_sso_start(struct rte_eventdev *event_dev) { - int rc; + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV]; + int rc, i; rc = cn10k_sso_updt_tx_adptr_data(event_dev); if (rc < 0) @@ -682,6 +714,9 @@ cn10k_sso_start(struct rte_eventdev *event_dev) if (rc < 0) return rc; cn10k_sso_fp_fns_set(event_dev); + for (i = 0; i < event_dev->data->nb_ports; i++) + hws[i] = i; + roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports); return rc; } @@ -689,6 +724,13 @@ cn10k_sso_start(struct rte_eventdev *event_dev) static void cn10k_sso_stop(struct rte_eventdev *event_dev) { + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV]; + int i; + + for (i = 0; i < event_dev->data->nb_ports; i++) + hws[i] = i; + roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports); cnxk_sso_stop(event_dev, cn10k_sso_hws_reset, cn10k_sso_hws_flush_events); } @@ -911,8 +953,8 @@ static int cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, uint32_t *caps) { - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", ENOTSUP); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", ENOTSUP); *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA | @@ -929,8 +971,8 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, { int ret; - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); @@ -944,8 +986,8 @@ static int cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, int32_t queue_pair_id) { - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); } @@ -963,8 +1005,8 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, struct rte_event_crypto_adapter_vector_limits *limits) { - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); limits->log2_sz = false; limits->min_sz = 0; @@ -993,6 +1035,8 @@ static struct eventdev_ops cn10k_sso_dev_ops = { .port_quiesce = cn10k_sso_port_quiesce, .port_link = cn10k_sso_port_link, .port_unlink = cn10k_sso_port_unlink, + .port_link_profile = cn10k_sso_port_link_profile, + .port_unlink_profile = cn10k_sso_port_unlink_profile, .timeout_ticks = cnxk_sso_timeout_ticks, .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get, diff --git a/drivers/event/cnxk/cn10k_eventdev.h b/drivers/event/cnxk/cn10k_eventdev.h index 29567728cde..e79b68e0ac9 100644 --- a/drivers/event/cnxk/cn10k_eventdev.h +++ b/drivers/event/cnxk/cn10k_eventdev.h @@ -10,9 +10,9 @@ struct cn10k_sso_hws { uint64_t base; - uint64_t gw_rdata; - void *lookup_mem; uint32_t gw_wdata; + void *lookup_mem; + uint64_t gw_rdata; uint8_t swtag_req; uint8_t hws_id; /* PTP timestamp */ diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c index 9b5bf901594..d59769717e4 100644 --- a/drivers/event/cnxk/cn10k_worker.c +++ b/drivers/event/cnxk/cn10k_worker.c @@ -431,3 +431,14 @@ cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[], return 1; } + +int __rte_hot +cn10k_sso_hws_profile_switch(void *port, uint8_t profile) +{ + struct cn10k_sso_hws *ws = port; + + ws->gw_wdata &= ~(0xFFUL); + ws->gw_wdata |= (profile + 1); + + return 0; +} diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h index b4ee023723e..8aa916fa129 100644 --- a/drivers/event/cnxk/cn10k_worker.h +++ b/drivers/event/cnxk/cn10k_worker.h @@ -59,9 +59,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc uint16_t lmt_id, d_off; struct rte_mbuf **wqe; struct rte_mbuf *mbuf; + uint64_t sa_base = 0; uintptr_t cpth = 0; uint8_t loff = 0; - uint64_t sa_base; int i; mbuf_init |= ((uint64_t)port_id) << 48; @@ -125,6 +125,11 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc cpth = ((uintptr_t)mbuf + (uint16_t)d_off); + /* Update mempool pointer for full mode pkt */ + if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) && + !((*(uint64_t *)cpth) & BIT(15))) + mbuf->pool = mp; + mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr, &loff, mbuf, d_off, flags, mbuf_init); @@ -199,6 +204,11 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64, mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem); meta_aura = mp ? mp->pool_id : m->pool->pool_id; + /* Update mempool pointer for full mode pkt */ + if (mp && (flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) && + !((*(uint64_t *)cpth) & BIT(15))) + ((struct rte_mbuf *)mbuf)->pool = mp; + mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc( cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff, (struct rte_mbuf *)mbuf, d_off, flags, @@ -239,19 +249,32 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev, } gw; gw.get_work = ws->gw_wdata; -#if defined(RTE_ARCH_ARM64) && !defined(__clang__) +#if defined(RTE_ARCH_ARM64) +#if !defined(__clang__) asm volatile( PLT_CPU_FEATURE_PREAMBLE "caspal %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n" : [wdata] "+r"(gw.get_work) : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0) : "memory"); +#else + register uint64_t x0 __asm("x0") = (uint64_t)gw.u64[0]; + register uint64_t x1 __asm("x1") = (uint64_t)gw.u64[1]; + asm volatile(".arch armv8-a+lse\n" + "caspal %[x0], %[x1], %[x0], %[x1], [%[dst]]\n" + : [x0] "+r"(x0), [x1] "+r"(x1) + : [dst] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0) + : "memory"); + gw.u64[0] = x0; + gw.u64[1] = x1; +#endif #else plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0); do { roc_load_pair(gw.u64[0], gw.u64[1], ws->base + SSOW_LF_GWS_WQE0); } while (gw.u64[0] & BIT_ULL(63)); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); #endif ws->gw_rdata = gw.u64[0]; if (gw.u64[1]) @@ -316,6 +339,7 @@ uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port, uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events); +int __rte_hot cn10k_sso_hws_profile_switch(void *port, uint8_t profile); #define R(name, flags) \ uint16_t __rte_hot cn10k_sso_hws_deq_##name( \ diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c index 6cce5477f0b..9fb9ca0d63e 100644 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@ -15,7 +15,7 @@ enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)] static int -cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link) +cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link, uint8_t profile) { struct cnxk_sso_evdev *dev = arg; struct cn9k_sso_hws_dual *dws; @@ -24,22 +24,20 @@ cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link) if (dev->dual_ws) { dws = port; - rc = roc_sso_hws_link(&dev->sso, - CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map, - nb_link); - rc |= roc_sso_hws_link(&dev->sso, - CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), - map, nb_link); + rc = roc_sso_hws_link(&dev->sso, CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map, nb_link, + profile); + rc |= roc_sso_hws_link(&dev->sso, CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), map, + nb_link, profile); } else { ws = port; - rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link); + rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link, profile); } return rc; } static int -cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link) +cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link, uint8_t profile) { struct cnxk_sso_evdev *dev = arg; struct cn9k_sso_hws_dual *dws; @@ -48,15 +46,13 @@ cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link) if (dev->dual_ws) { dws = port; - rc = roc_sso_hws_unlink(&dev->sso, - CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), - map, nb_link); - rc |= roc_sso_hws_unlink(&dev->sso, - CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), - map, nb_link); + rc = roc_sso_hws_unlink(&dev->sso, CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map, + nb_link, profile); + rc |= roc_sso_hws_unlink(&dev->sso, CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), map, + nb_link, profile); } else { ws = port; - rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link); + rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link, profile); } return rc; @@ -97,21 +93,24 @@ cn9k_sso_hws_release(void *arg, void *hws) struct cnxk_sso_evdev *dev = arg; struct cn9k_sso_hws_dual *dws; struct cn9k_sso_hws *ws; - uint16_t i; + uint16_t i, k; if (dev->dual_ws) { dws = hws; for (i = 0; i < dev->nb_event_queues; i++) { - roc_sso_hws_unlink(&dev->sso, - CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), &i, 1); - roc_sso_hws_unlink(&dev->sso, - CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), &i, 1); + for (k = 0; k < CNXK_SSO_MAX_PROFILES; k++) { + roc_sso_hws_unlink(&dev->sso, CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), + &i, 1, k); + roc_sso_hws_unlink(&dev->sso, CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1), + &i, 1, k); + } } memset(dws, 0, sizeof(*dws)); } else { ws = hws; for (i = 0; i < dev->nb_event_queues; i++) - roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1); + for (k = 0; k < CNXK_SSO_MAX_PROFILES; k++) + roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1, k); memset(ws, 0, sizeof(*ws)); } } @@ -222,16 +221,16 @@ cn9k_sso_hws_reset(void *arg, void *hws) cnxk_sso_hws_swtag_untag( base + SSOW_LF_GWS_OP_SWTAG_UNTAG); plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); + } else if (pend_tt != SSO_TT_EMPTY) { + plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); } /* Wait for desched to complete. */ do { pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); - } while (pend_state & BIT_ULL(58)); - + } while (pend_state & (BIT_ULL(58) | BIT_ULL(56))); plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); } - if (dev->dual_ws) dws->swtag_req = 0; else @@ -438,6 +437,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) event_dev->enqueue_burst = cn9k_sso_hws_enq_burst; event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst; event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst; + event_dev->profile_switch = cn9k_sso_hws_profile_switch; if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg); CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, @@ -475,6 +475,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) event_dev->enqueue_forward_burst = cn9k_sso_hws_dual_enq_fwd_burst; event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq; + event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch; if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, @@ -686,18 +687,30 @@ cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, base, &ev, dev->rx_offloads, dev->dual_ws ? dws->lookup_mem : ws->lookup_mem, dev->dual_ws ? dws->tstamp : ws->tstamp); - if (is_pend && ev.u64) { + if (is_pend && ev.u64) if (flush_cb) flush_cb(event_dev->data->dev_id, ev, args); - cnxk_sso_hws_swtag_flush(ws->base); - } + + ptag = (plt_read64(base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY; + if (ptag != SSO_TT_EMPTY) + cnxk_sso_hws_swtag_flush(base); + + do { + ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & BIT_ULL(56)); + + plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); } + + if (dev->dual_ws) + dws->swtag_req = 0; + else + ws->swtag_req = 0; } static int -cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port, - const uint8_t queues[], const uint8_t priorities[], - uint16_t nb_links) +cn9k_sso_port_link_profile(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links, uint8_t profile) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uint16_t hwgrp_ids[nb_links]; @@ -706,14 +719,14 @@ cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port, RTE_SET_USED(priorities); for (link = 0; link < nb_links; link++) hwgrp_ids[link] = queues[link]; - nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links); + nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links, profile); return (int)nb_links; } static int -cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, - uint8_t queues[], uint16_t nb_unlinks) +cn9k_sso_port_unlink_profile(struct rte_eventdev *event_dev, void *port, uint8_t queues[], + uint16_t nb_unlinks, uint8_t profile) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uint16_t hwgrp_ids[nb_unlinks]; @@ -721,11 +734,25 @@ cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, for (unlink = 0; unlink < nb_unlinks; unlink++) hwgrp_ids[unlink] = queues[unlink]; - nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks); + nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks, profile); return (int)nb_unlinks; } +static int +cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links) +{ + return cn9k_sso_port_link_profile(event_dev, port, queues, priorities, nb_links, 0); +} + +static int +cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, uint8_t queues[], + uint16_t nb_unlinks) +{ + return cn9k_sso_port_unlink_profile(event_dev, port, queues, nb_unlinks, 0); +} + static int cn9k_sso_start(struct rte_eventdev *event_dev) { @@ -942,11 +969,11 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, } static int -cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, - const struct rte_cryptodev *cdev, uint32_t *caps) +cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, + uint32_t *caps) { - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", ENOTSUP); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", ENOTSUP); *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; @@ -962,8 +989,8 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, { int ret; - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", EINVAL); cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); @@ -977,8 +1004,8 @@ static int cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, int32_t queue_pair_id) { - CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); - CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", EINVAL); return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); } @@ -1006,6 +1033,8 @@ static struct eventdev_ops cn9k_sso_dev_ops = { .port_quiesce = cn9k_sso_port_quiesce, .port_link = cn9k_sso_port_link, .port_unlink = cn9k_sso_port_unlink, + .port_link_profile = cn9k_sso_port_link_profile, + .port_unlink_profile = cn9k_sso_port_unlink_profile, .timeout_ticks = cnxk_sso_timeout_ticks, .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get, diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c index abbbfffd853..a9ac49a5a7b 100644 --- a/drivers/event/cnxk/cn9k_worker.c +++ b/drivers/event/cnxk/cn9k_worker.c @@ -66,6 +66,17 @@ cn9k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[], return 1; } +int __rte_hot +cn9k_sso_hws_profile_switch(void *port, uint8_t profile) +{ + struct cn9k_sso_hws *ws = port; + + ws->gw_wdata &= ~(0xFFUL); + ws->gw_wdata |= (profile + 1); + + return 0; +} + /* Dual ws ops. */ uint16_t __rte_hot @@ -149,3 +160,14 @@ cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events) return cn9k_cpt_crypto_adapter_enqueue(dws->base[!dws->vws], ev->event_ptr); } + +int __rte_hot +cn9k_sso_hws_dual_profile_switch(void *port, uint8_t profile) +{ + struct cn9k_sso_hws_dual *dws = port; + + dws->gw_wdata &= ~(0xFFUL); + dws->gw_wdata |= (profile + 1); + + return 0; +} diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h index 9ddab095ace..6936b7ad049 100644 --- a/drivers/event/cnxk/cn9k_worker.h +++ b/drivers/event/cnxk/cn9k_worker.h @@ -156,15 +156,6 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base, } } -static __rte_always_inline void -cn9k_sso_tx_tag_flush(uint64_t base) -{ - if (unlikely(CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) == - SSO_TT_EMPTY)) - return; - plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); -} - static __rte_always_inline void cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id, const uint32_t tag, const uint32_t flags, @@ -375,6 +366,7 @@ uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port, uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events); +int __rte_hot cn9k_sso_hws_profile_switch(void *port, uint8_t profile); uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev); @@ -391,6 +383,7 @@ uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events); uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events); +int __rte_hot cn9k_sso_hws_dual_profile_switch(void *port, uint8_t profile); #define R(name, flags) \ uint16_t __rte_hot cn9k_sso_hws_deq_##name( \ @@ -727,7 +720,6 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, uint64_t *txq_data, const uint32_t flags) { struct rte_mbuf *m = ev->mbuf; - uint16_t ref_cnt = m->refcnt; struct cn9k_eth_txq *txq; /* Perform header writes before barrier for TSO */ @@ -800,13 +792,6 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, } done: - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { - if (ref_cnt > 1) - return 1; - } - - cn9k_sso_tx_tag_flush(base); - return 1; } diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c index 27883a3619d..0c61f4c20ee 100644 --- a/drivers/event/cnxk/cnxk_eventdev.c +++ b/drivers/event/cnxk/cnxk_eventdev.c @@ -30,7 +30,9 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev, RTE_EVENT_DEV_CAP_NONSEQ_MODE | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE | - RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR; + RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR | + RTE_EVENT_DEV_CAP_PROFILE_LINK; + dev_info->max_profiles_per_port = CNXK_SSO_MAX_PROFILES; } int @@ -128,23 +130,25 @@ cnxk_sso_restore_links(const struct rte_eventdev *event_dev, { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP]; - int i, j; + int i, j, k; for (i = 0; i < dev->nb_event_ports; i++) { - uint16_t nb_hwgrp = 0; - - links_map = event_dev->data->links_map; - /* Point links_map to this port specific area */ - links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV); + for (k = 0; k < CNXK_SSO_MAX_PROFILES; k++) { + uint16_t nb_hwgrp = 0; + + links_map = event_dev->data->links_map[k]; + /* Point links_map to this port specific area */ + links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV); + + for (j = 0; j < dev->nb_event_queues; j++) { + if (links_map[j] == 0xdead) + continue; + hwgrp[nb_hwgrp] = j; + nb_hwgrp++; + } - for (j = 0; j < dev->nb_event_queues; j++) { - if (links_map[j] == 0xdead) - continue; - hwgrp[nb_hwgrp] = j; - nb_hwgrp++; + link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp, k); } - - link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp); } } @@ -435,7 +439,7 @@ cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uint16_t all_queues[CNXK_SSO_MAX_HWGRP]; - uint16_t i; + uint16_t i, j; void *ws; if (!dev->configured) @@ -446,7 +450,8 @@ cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn) for (i = 0; i < dev->nb_event_ports; i++) { ws = event_dev->data->ports[i]; - unlink_fn(dev, ws, all_queues, dev->nb_event_queues); + for (j = 0; j < CNXK_SSO_MAX_PROFILES; j++) + unlink_fn(dev, ws, all_queues, dev->nb_event_queues, j); rte_free(cnxk_sso_hws_get_cookie(ws)); event_dev->data->ports[i] = NULL; } @@ -612,7 +617,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) &dev->force_ena_bp); rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, &single_ws); - rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag, + rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value, &dev->gw_mode); rte_kvargs_process(kvlist, CN10K_SSO_STASH, &parse_sso_kvargs_stash_dict, dev); diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index 962e6302566..d42d1afa1a1 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -33,6 +33,8 @@ #define CN10K_SSO_GW_MODE "gw_mode" #define CN10K_SSO_STASH "stash" +#define CNXK_SSO_MAX_PROFILES 2 + #define NSEC2USEC(__ns) ((__ns) / 1E3) #define USEC2NSEC(__us) ((__us)*1E3) #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9) @@ -48,19 +50,19 @@ (min + val / ((max + cnt - 1) / cnt)) #define CNXK_SSO_FLUSH_RETRY_MAX 0xfff -#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \ +#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name, err_val) \ do { \ if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \ - return -EINVAL; \ + return -err_val; \ } while (0) typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id); typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t grp_base); typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws); -typedef int (*cnxk_sso_link_t)(void *dev, void *ws, uint16_t *map, - uint16_t nb_link); -typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map, - uint16_t nb_link); +typedef int (*cnxk_sso_link_t)(void *dev, void *ws, uint16_t *map, uint16_t nb_link, + uint8_t profile); +typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map, uint16_t nb_link, + uint8_t profile); typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev); typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws); typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base, @@ -119,7 +121,7 @@ struct cnxk_sso_evdev { /* CN9K */ uint8_t dual_ws; /* CN10K */ - uint8_t gw_mode; + uint32_t gw_mode; uint16_t stash_cnt; struct cnxk_sso_stash *stash_parse_data; } __rte_cache_aligned; diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c index 121480df15f..6d59fdf9098 100644 --- a/drivers/event/cnxk/cnxk_tim_evdev.c +++ b/drivers/event/cnxk/cnxk_tim_evdev.c @@ -392,6 +392,7 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, cnxk_tim_ops.start = cnxk_tim_ring_start; cnxk_tim_ops.stop = cnxk_tim_ring_stop; cnxk_tim_ops.get_info = cnxk_tim_ring_info_get; + cnxk_tim_ops.remaining_ticks_get = cnxk_tim_remaining_ticks_get; sso_set_priv_mem_fn = priv_mem_fn; if (dev->enable_stats) { diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h index 3a0b036cb40..b91fcb3acab 100644 --- a/drivers/event/cnxk/cnxk_tim_evdev.h +++ b/drivers/event/cnxk/cnxk_tim_evdev.h @@ -320,6 +320,9 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, struct rte_event_timer **tim, const uint16_t nb_timers); +int cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, + const struct rte_event_timer *evtim, uint64_t *ticks_remaining); + int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps, const struct event_timer_adapter_ops **ops, diff --git a/drivers/event/cnxk/cnxk_tim_worker.c b/drivers/event/cnxk/cnxk_tim_worker.c index 923a72093b8..944490da9e3 100644 --- a/drivers/event/cnxk/cnxk_tim_worker.c +++ b/drivers/event/cnxk/cnxk_tim_worker.c @@ -171,3 +171,38 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, return index; } + +int +cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, + const struct rte_event_timer *evtim, uint64_t *ticks_remaining) +{ + struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv; + struct cnxk_tim_bkt *bkt, *current_bkt; + struct cnxk_tim_ent *entry; + uint64_t bkt_cyc, bucket; + uint64_t sema; + + if (evtim->impl_opaque[1] == 0 || evtim->impl_opaque[0] == 0) + return -ENOENT; + + entry = (struct cnxk_tim_ent *)(uintptr_t)evtim->impl_opaque[0]; + if (entry->wqe != evtim->ev.u64) + return -ENOENT; + + if (evtim->state != RTE_EVENT_TIMER_ARMED) + return -ENOENT; + + bkt = (struct cnxk_tim_bkt *)evtim->impl_opaque[1]; + sema = __atomic_load_n(&bkt->w1, rte_memory_order_acquire); + if (cnxk_tim_bkt_get_hbt(sema) || !cnxk_tim_bkt_get_nent(sema)) + return -ENOENT; + + bkt_cyc = tim_ring->tick_fn(tim_ring->tbase) - tim_ring->ring_start_cyc; + bucket = rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div); + current_bkt = &tim_ring->bkt[bucket]; + + *ticks_remaining = RTE_MAX(bkt, current_bkt) - RTE_MIN(bkt, current_bkt); + /* Assume that the current bucket is yet to expire */ + *ticks_remaining += 1; + return 0; +} diff --git a/drivers/event/cnxk/cnxk_worker.c b/drivers/event/cnxk/cnxk_worker.c new file mode 100644 index 00000000000..60876abcffa --- /dev/null +++ b/drivers/event/cnxk/cnxk_worker.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell. + */ + +#include +#include +#include + +#include "roc_platform.h" +#include "roc_sso.h" +#include "roc_sso_dp.h" + +struct pwords { + uint64_t u[5]; +}; + +void +rte_pmd_cnxk_eventdev_wait_head(uint8_t dev, uint8_t port) +{ + struct pwords *w = rte_event_fp_ops[dev].data[port]; + uint8_t vws; + + if (w->u[1] & 0x3) { + roc_sso_hws_head_wait(w->u[0]); + } else { + /* Dual WS case */ + vws = (w->u[4] >> 8) & 0x1; + roc_sso_hws_head_wait(w->u[vws]); + } +} + + +uint8_t +rte_pmd_cnxk_eventdev_is_head(uint8_t dev, uint8_t port) +{ + struct pwords *w = rte_event_fp_ops[dev].data[port]; + uintptr_t base; + uint8_t vws; + + if (w->u[1] & 0x3) { + base = w->u[0]; + } else { + /* Dual WS case */ + vws = (w->u[4] >> 8) & 0x1; + base = w->u[vws]; + } + + return roc_sso_hws_is_head(base); +} diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h index 2bd41f8a5ed..0e0d728ba4c 100644 --- a/drivers/event/cnxk/cnxk_worker.h +++ b/drivers/event/cnxk/cnxk_worker.h @@ -54,12 +54,6 @@ cnxk_sso_hws_swtag_untag(uintptr_t swtag_untag_op) static __rte_always_inline void cnxk_sso_hws_swtag_flush(uint64_t base) { - /* Ensure that there is no previous flush is pending. */ - while (plt_read64(base + SSOW_LF_GWS_PENDSTATE) & BIT_ULL(56)) - ; - if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) == - SSO_TT_EMPTY) - return; plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); } diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build index 51f1be88487..13281d687f7 100644 --- a/drivers/event/cnxk/meson.build +++ b/drivers/event/cnxk/meson.build @@ -20,6 +20,7 @@ endif sources = files( 'cnxk_eventdev.c', + 'cnxk_worker.c', 'cnxk_eventdev_adptr.c', 'cnxk_eventdev_selftest.c', 'cnxk_eventdev_stats.c', @@ -314,6 +315,7 @@ foreach flag: extra_flags endif endforeach +headers = files('rte_pmd_cnxk_eventdev.h') deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk'] require_iova_in_mbuf = false diff --git a/drivers/event/cnxk/rte_pmd_cnxk_eventdev.h b/drivers/event/cnxk/rte_pmd_cnxk_eventdev.h new file mode 100644 index 00000000000..b7b56f5fa1b --- /dev/null +++ b/drivers/event/cnxk/rte_pmd_cnxk_eventdev.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell Inc. + */ + +/** + * @file rte_pmd_cnxk_eventdev.h + * Marvell CNXK eventdev PMD specific functions. + * + **/ + +#ifndef _PMD_CNXK_EVENTDEV_H_ +#define _PMD_CNXK_EVENTDEV_H_ + +#include +#include + +/** + * Wait for the currently active flow context on the event port to become HEAD + * of the flow-chain. + * + * @param dev + * Event device identifier. + * + * @param port + * Event port identifier. + */ +__rte_experimental +void +rte_pmd_cnxk_eventdev_wait_head(uint8_t dev, uint8_t port); + + +/** + * Check if the currently active flow context on the event port is the HEAD + * of the flow-chain. + * + * @param dev + * Event device identifier. + * + * @param port + * Event port identifier. + * + * @return Status of the currently held flow context + * 0 not the head of the flow-chain + * 1 head of the flow-chain + */ +__rte_experimental +uint8_t +rte_pmd_cnxk_eventdev_is_head(uint8_t dev, uint8_t port); + +#endif diff --git a/drivers/event/cnxk/version.map b/drivers/event/cnxk/version.map new file mode 100644 index 00000000000..3dd9a8fdd11 --- /dev/null +++ b/drivers/event/cnxk/version.map @@ -0,0 +1,11 @@ +DPDK_24 { + local: *; +}; + +EXPERIMENTAL { + global: + + # added in 23.11 + rte_pmd_cnxk_eventdev_is_head; + rte_pmd_cnxk_eventdev_wait_head; +}; diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 60c5cd48040..e645f7595ab 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -79,6 +79,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE), + .max_profiles_per_port = 1, }; struct process_local_port_data @@ -117,63 +118,6 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, } } -/* override defaults with value(s) provided on command line */ -static void -dlb2_init_cq_weight(struct dlb2_eventdev *dlb2, int *cq_weight) -{ - int q; - - for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) - dlb2->ev_ports[q].cq_weight = cq_weight[q]; -} - -static int -set_cq_weight(const char *key __rte_unused, - const char *value, - void *opaque) -{ - struct dlb2_cq_weight *cq_weight = opaque; - int first, last, weight, i; - - if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); - return -EINVAL; - } - - /* command line override may take one of the following 3 forms: - * qid_depth_thresh=all: ... all queues - * qid_depth_thresh=qidA-qidB: ... a range of queues - * qid_depth_thresh=qid: ... just one queue - */ - if (sscanf(value, "all:%d", &weight) == 1) { - first = 0; - last = DLB2_MAX_NUM_PORTS_ALL - 1; - } else if (sscanf(value, "%d-%d:%d", &first, &last, &weight) == 3) { - /* we have everything we need */ - } else if (sscanf(value, "%d:%d", &first, &weight) == 2) { - last = first; - } else { - DLB2_LOG_ERR("Error parsing ldb port qe weight devarg. Should be all:val, qid-qid:val, or qid:val\n"); - return -EINVAL; - } - - if (first > last || first < 0 || - last >= DLB2_MAX_NUM_PORTS_ALL) { - DLB2_LOG_ERR("Error parsing ldb port qe weight arg, invalid port value\n"); - return -EINVAL; - } - - if (weight < 0 || weight > DLB2_MAX_CQ_DEPTH_OVERRIDE) { - DLB2_LOG_ERR("Error parsing ldb port qe weight devarg, must be < cq depth\n"); - return -EINVAL; - } - - for (i = first; i <= last; i++) - cq_weight->limit[i] = weight; /* indexed by qid */ - - return 0; -} - /* override defaults with value(s) provided on command line */ static void dlb2_init_port_cos(struct dlb2_eventdev *dlb2, int *port_cos) @@ -397,7 +341,6 @@ set_max_enq_depth(const char *key __rte_unused, return 0; } - static int set_max_num_events(const char *key __rte_unused, const char *value, @@ -667,6 +610,26 @@ set_default_ldb_port_allocation(const char *key __rte_unused, return 0; } +static int +set_enable_cq_weight(const char *key __rte_unused, + const char *value, + void *opaque) +{ + bool *enable_cq_weight = opaque; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + if ((*value == 'y') || (*value == 'Y')) + *enable_cq_weight = true; + else + *enable_cq_weight = false; + + return 0; +} + static int set_qid_depth_thresh(const char *key __rte_unused, const char *value, @@ -1644,26 +1607,20 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, qm_port->id = qm_port_id; - if (dlb2->version == DLB2_HW_V2) { - qm_port->cached_ldb_credits = 0; - qm_port->cached_dir_credits = 0; - if (ev_port->cq_weight) { - struct dlb2_enable_cq_weight_args - cq_weight_args = { {0} }; - - cq_weight_args.port_id = qm_port->id; - cq_weight_args.limit = ev_port->cq_weight; - ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); - if (ret < 0) { - DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + if (dlb2->version == DLB2_HW_V2_5 && (dlb2->enable_cq_weight == true)) { + struct dlb2_enable_cq_weight_args cq_weight_args = { {0} }; + cq_weight_args.port_id = qm_port->id; + cq_weight_args.limit = dequeue_depth; + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); + + if (ret < 0) { + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", ret, dlb2_error_strings[cfg.response. status]); - goto error_exit; - } + goto error_exit; } - qm_port->cq_weight = ev_port->cq_weight; + qm_port->cq_weight = dequeue_depth; } else { - qm_port->cached_credits = 0; qm_port->cq_weight = 0; } @@ -1679,7 +1636,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, else qm_port->cq_depth_mask = qm_port->cq_depth - 1; - qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask); + qm_port->gen_bit_shift = rte_popcount32(qm_port->cq_depth_mask); /* starting value of gen bit - it toggles at wrap time */ qm_port->gen_bit = 1; @@ -1893,7 +1850,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, else qm_port->cq_depth_mask = cfg.cq_depth - 1; - qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask); + qm_port->gen_bit_shift = rte_popcount32(qm_port->cq_depth_mask); /* starting value of gen bit - it toggles at wrap time */ qm_port->gen_bit = 1; dlb2_hw_cq_bitmask_init(qm_port, qm_port->cq_depth); @@ -3695,7 +3652,7 @@ dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe) /* Mask off gen bits we don't care about */ gen_bits &= and_mask; - return __builtin_popcount(gen_bits); + return rte_popcount32(gen_bits); } static inline void @@ -3946,7 +3903,7 @@ dlb2_recv_qe_sparse_vec(struct dlb2_port *qm_port, void *events, */ uint64_t rolling = qm_port->cq_rolling_mask & 0xF; uint64_t qe_xor_bits = (qe_gen_bits ^ rolling); - uint32_t count_new = __builtin_popcount(qe_xor_bits); + uint32_t count_new = rte_popcount32(qe_xor_bits); count_new = RTE_MIN(count_new, max_events); if (!count_new) return 0; @@ -4122,7 +4079,7 @@ dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe, /* Mask off gen bits we don't care about */ gen_bits &= and_mask[*offset]; - return __builtin_popcount(gen_bits); + return rte_popcount32(gen_bits); } static inline int16_t @@ -4621,6 +4578,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta; dlb2->default_depth_thresh = dlb2_args->default_depth_thresh; dlb2->vector_opts_enabled = dlb2_args->vector_opts_enabled; + dlb2->enable_cq_weight = dlb2_args->enable_cq_weight; if (dlb2_args->max_cq_depth != 0) @@ -4641,9 +4599,6 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_queue_depth_thresholds(dlb2, dlb2_args->qid_depth_thresholds.val); - dlb2_init_cq_weight(dlb2, - dlb2_args->cq_weight.limit); - dlb2_init_port_cos(dlb2, dlb2_args->port_cos.cos_id); @@ -4774,11 +4729,11 @@ dlb2_parse_params(const char *params, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, DLB2_MAX_ENQ_DEPTH, - DLB2_CQ_WEIGHT, DLB2_PORT_COS, DLB2_COS_BW, DLB2_PRODUCER_COREMASK, DLB2_DEFAULT_LDB_PORT_ALLOCATION_ARG, + DLB2_ENABLE_CQ_WEIGHT_ARG, NULL }; if (params != NULL && params[0] != '\0') { @@ -4926,17 +4881,6 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, - DLB2_CQ_WEIGHT, - set_cq_weight, - &dlb2_args->cq_weight); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing cq weight on", - name); - rte_kvargs_free(kvlist); - return ret; - } - ret = rte_kvargs_process(kvlist, DLB2_PORT_COS, set_port_cos, @@ -4983,6 +4927,17 @@ dlb2_parse_params(const char *params, return ret; } + ret = rte_kvargs_process(kvlist, + DLB2_ENABLE_CQ_WEIGHT_ARG, + set_enable_cq_weight, + &dlb2_args->enable_cq_weight); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing enable_cq_weight arg", + name); + rte_kvargs_free(kvlist); + return ret; + } + rte_kvargs_free(kvlist); } } diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index f4b9e7f9ffb..31a3beeb6c8 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -47,11 +47,11 @@ #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" #define DLB2_MAX_ENQ_DEPTH "max_enqueue_depth" -#define DLB2_CQ_WEIGHT "cq_weight" #define DLB2_PORT_COS "port_cos" #define DLB2_COS_BW "cos_bw" #define DLB2_PRODUCER_COREMASK "producer_coremask" #define DLB2_DEFAULT_LDB_PORT_ALLOCATION_ARG "default_port_allocation" +#define DLB2_ENABLE_CQ_WEIGHT_ARG "enable_cq_weight" /* Begin HW related defines and structs */ @@ -637,6 +637,7 @@ struct dlb2_eventdev { uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */ uint32_t cos_bw[DLB2_COS_NUM_VALS]; /* bandwidth per cos domain */ uint8_t max_cos_port; /* Max LDB port from any cos */ + bool enable_cq_weight; }; /* used for collecting and passing around the dev args */ @@ -674,6 +675,7 @@ struct dlb2_devargs { struct dlb2_cos_bw cos_bw; const char *producer_coremask; bool default_ldb_port_allocation; + bool enable_cq_weight; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/pf/base/dlb2_osdep.h b/drivers/event/dlb2/pf/base/dlb2_osdep.h index cffe22f3c57..06d69f39b1e 100644 --- a/drivers/event/dlb2/pf/base/dlb2_osdep.h +++ b/drivers/event/dlb2/pf/base/dlb2_osdep.h @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -154,7 +153,7 @@ static inline void os_fence_hcw(struct dlb2_hw *hw, u64 *pp_addr) * map and unmap requests. To prevent deadlock, this function gives other * threads a chance to grab the resource mutex and configure hardware. */ -static void *dlb2_complete_queue_map_unmap(void *__args) +static uint32_t dlb2_complete_queue_map_unmap(void *__args) { struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)__args; int ret; @@ -180,7 +179,7 @@ static void *dlb2_complete_queue_map_unmap(void *__args) rte_spinlock_unlock(&dlb2_dev->resource_mutex); - return NULL; + return 0; } @@ -194,16 +193,13 @@ static void *dlb2_complete_queue_map_unmap(void *__args) static inline void os_schedule_work(struct dlb2_hw *hw) { struct dlb2_dev *dlb2_dev; - pthread_t complete_queue_map_unmap_thread; + rte_thread_t complete_queue_map_unmap_thread; int ret; dlb2_dev = container_of(hw, struct dlb2_dev, hw); - ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread, - "dlb_queue_unmap_waiter", - NULL, - dlb2_complete_queue_map_unmap, - dlb2_dev); + ret = rte_thread_create_internal_control(&complete_queue_map_unmap_thread, + "dlb-qunmap", dlb2_complete_queue_map_unmap, dlb2_dev); if (ret) DLB2_ERR(dlb2_dev, "Could not create queue complete map/unmap thread, err=%d\n", diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index dd8390a0132..7ce3e3531c1 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -759,18 +759,13 @@ static int dlb2_attach_ldb_queues(struct dlb2_hw *hw, } static int -dlb2_pp_profile(struct dlb2_hw *hw, int port, int cpu, bool is_ldb) +dlb2_pp_profile(struct dlb2_hw *hw, int port, bool is_ldb) { u64 cycle_start = 0ULL, cycle_end = 0ULL; struct dlb2_hcw hcw_mem[DLB2_HCW_MEM_SIZE], *hcw; void __iomem *pp_addr; - cpu_set_t cpuset; int i; - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - sched_setaffinity(0, sizeof(cpuset), &cpuset); - pp_addr = os_map_producer_port(hw, port, is_ldb); /* Point hcw to a 64B-aligned location */ @@ -797,18 +792,15 @@ dlb2_pp_profile(struct dlb2_hw *hw, int port, int cpu, bool is_ldb) return (int)(cycle_end - cycle_start); } -static void * +static uint32_t dlb2_pp_profile_func(void *data) { struct dlb2_pp_thread_data *thread_data = data; - int cycles; - - cycles = dlb2_pp_profile(thread_data->hw, thread_data->pp, - thread_data->cpu, thread_data->is_ldb); - thread_data->cycles = cycles; + thread_data->cycles = dlb2_pp_profile(thread_data->hw, + thread_data->pp, thread_data->is_ldb); - return NULL; + return 0; } static int dlb2_pp_cycle_comp(const void *a, const void *b) @@ -831,7 +823,9 @@ dlb2_get_pp_allocation(struct dlb2_hw *hw, int cpu, int port_type) int num_ports_per_sort, num_ports, num_sort, i, err; bool is_ldb = (port_type == DLB2_LDB_PORT); int *port_allocations; - pthread_t pthread; + rte_thread_t thread; + rte_thread_attr_t th_attr; + char th_name[RTE_THREAD_INTERNAL_NAME_SIZE]; if (is_ldb) { port_allocations = hw->ldb_pp_allocations; @@ -857,16 +851,25 @@ dlb2_get_pp_allocation(struct dlb2_hw *hw, int cpu, int port_type) dlb2_thread_data[i].pp = i; dlb2_thread_data[i].cycles = 0; dlb2_thread_data[i].hw = hw; - dlb2_thread_data[i].cpu = cpu; - err = pthread_create(&pthread, NULL, &dlb2_pp_profile_func, - &dlb2_thread_data[i]); + err = rte_thread_attr_init(&th_attr); + if (err != 0) { + DLB2_LOG_ERR(": thread attribute failed! err=%d", err); + return; + } + CPU_SET(cpu, &th_attr.cpuset); + + err = rte_thread_create(&thread, &th_attr, + &dlb2_pp_profile_func, &dlb2_thread_data[i]); if (err) { DLB2_LOG_ERR(": thread creation failed! err=%d", err); return; } - err = pthread_join(pthread, NULL); + snprintf(th_name, sizeof(th_name), "dlb2-pp%d", cpu); + rte_thread_set_prefixed_name(thread, th_name); + + err = rte_thread_join(thread, NULL); if (err) { DLB2_LOG_ERR(": thread join failed! err=%d", err); return; diff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c index 717aa4fc08a..aa03e4c3111 100644 --- a/drivers/event/dlb2/pf/dlb2_main.c +++ b/drivers/event/dlb2/pf/dlb2_main.c @@ -27,73 +27,6 @@ #define NO_OWNER_VF 0 /* PF ONLY! */ #define NOT_VF_REQ false /* PF ONLY! */ -#define DLB2_PCI_CAP_POINTER 0x34 -#define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC) -#define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF) - -#define DLB2_PCI_LNKCTL 16 -#define DLB2_PCI_SLTCTL 24 -#define DLB2_PCI_RTCTL 28 -#define DLB2_PCI_EXP_DEVCTL2 40 -#define DLB2_PCI_LNKCTL2 48 -#define DLB2_PCI_SLTCTL2 56 -#define DLB2_PCI_CMD 4 -#define DLB2_PCI_EXP_DEVSTA 10 -#define DLB2_PCI_EXP_DEVSTA_TRPND 0x20 -#define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000 - -#define DLB2_PCI_CAP_ID_EXP 0x10 -#define DLB2_PCI_CAP_ID_MSIX 0x11 -#define DLB2_PCI_EXT_CAP_ID_PRI 0x13 -#define DLB2_PCI_EXT_CAP_ID_ACS 0xD - -#define DLB2_PCI_PRI_CTRL_ENABLE 0x1 -#define DLB2_PCI_PRI_ALLOC_REQ 0xC -#define DLB2_PCI_PRI_CTRL 0x4 -#define DLB2_PCI_MSIX_FLAGS 0x2 -#define DLB2_PCI_MSIX_FLAGS_ENABLE 0x8000 -#define DLB2_PCI_MSIX_FLAGS_MASKALL 0x4000 -#define DLB2_PCI_ERR_ROOT_STATUS 0x30 -#define DLB2_PCI_ERR_COR_STATUS 0x10 -#define DLB2_PCI_ERR_UNCOR_STATUS 0x4 -#define DLB2_PCI_COMMAND_INTX_DISABLE 0x400 -#define DLB2_PCI_ACS_CAP 0x4 -#define DLB2_PCI_ACS_CTRL 0x6 -#define DLB2_PCI_ACS_SV 0x1 -#define DLB2_PCI_ACS_RR 0x4 -#define DLB2_PCI_ACS_CR 0x8 -#define DLB2_PCI_ACS_UF 0x10 -#define DLB2_PCI_ACS_EC 0x20 - -static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id) -{ - uint8_t pos; - int ret; - uint16_t hdr; - - ret = rte_pci_read_config(pdev, &pos, 1, DLB2_PCI_CAP_POINTER); - pos &= 0xFC; - - if (ret != 1) - return -1; - - while (pos > 0x3F) { - ret = rte_pci_read_config(pdev, &hdr, 2, pos); - if (ret != 2) - return -1; - - if (DLB2_PCI_CAP_ID(hdr) == id) - return pos; - - if (DLB2_PCI_CAP_ID(hdr) == 0xFF) - return -1; - - pos = DLB2_PCI_CAP_NEXT(hdr); - } - - return -1; -} - static int dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev) { @@ -258,9 +191,9 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) uint32_t pri_reqs_dword; uint16_t pri_ctrl_word; - int pcie_cap_offset; + off_t pcie_cap_offset; int pri_cap_offset; - int msix_cap_offset; + off_t msix_cap_offset; int err_cap_offset; int acs_cap_offset; int wait_count; @@ -277,7 +210,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - pcie_cap_offset = dlb2_pci_find_capability(pdev, DLB2_PCI_CAP_ID_EXP); + pcie_cap_offset = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_EXP); if (pcie_cap_offset < 0) { DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n", @@ -289,42 +222,42 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2) dev_ctl_word = 0; - off = pcie_cap_offset + DLB2_PCI_LNKCTL; + off = pcie_cap_offset + RTE_PCI_EXP_LNKCTL; if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2) lnk_word = 0; - off = pcie_cap_offset + DLB2_PCI_SLTCTL; + off = pcie_cap_offset + RTE_PCI_EXP_SLTCTL; if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2) slt_word = 0; - off = pcie_cap_offset + DLB2_PCI_RTCTL; + off = pcie_cap_offset + RTE_PCI_EXP_RTCTL; if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2) rt_ctl_word = 0; - off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2; + off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL2; if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2) dev_ctl2_word = 0; - off = pcie_cap_offset + DLB2_PCI_LNKCTL2; + off = pcie_cap_offset + RTE_PCI_EXP_LNKCTL2; if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2) lnk_word2 = 0; - off = pcie_cap_offset + DLB2_PCI_SLTCTL2; + off = pcie_cap_offset + RTE_PCI_EXP_SLTCTL2; if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2) slt_word2 = 0; - off = DLB2_PCI_EXT_CAP_ID_PRI; + off = RTE_PCI_EXT_CAP_ID_PRI; pri_cap_offset = rte_pci_find_ext_capability(pdev, off); if (pri_cap_offset >= 0) { - off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ; + off = pri_cap_offset + RTE_PCI_PRI_ALLOC_REQ; if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4) pri_reqs_dword = 0; } /* clear the PCI command register before issuing the FLR */ - off = DLB2_PCI_CMD; + off = RTE_PCI_COMMAND; cmd = 0; if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { DLB2_LOG_ERR("[%s()] failed to write the pci command\n", @@ -336,7 +269,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) for (wait_count = 0; wait_count < 4; wait_count++) { int sleep_time; - off = pcie_cap_offset + DLB2_PCI_EXP_DEVSTA; + off = pcie_cap_offset + RTE_PCI_EXP_DEVSTA; ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to read the pci device status\n", @@ -344,7 +277,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - if (!(devsta_busy_word & DLB2_PCI_EXP_DEVSTA_TRPND)) + if (!(devsta_busy_word & RTE_PCI_EXP_DEVSTA_TRPND)) break; sleep_time = (1 << (wait_count)) * 100; @@ -365,7 +298,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - devctl_word |= DLB2_PCI_EXP_DEVCTL_BCR_FLR; + devctl_word |= RTE_PCI_EXP_DEVCTL_BCR_FLR; ret = rte_pci_write_config(pdev, &devctl_word, 2, off); if (ret != 2) { @@ -387,7 +320,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pcie_cap_offset + DLB2_PCI_LNKCTL; + off = pcie_cap_offset + RTE_PCI_EXP_LNKCTL; ret = rte_pci_write_config(pdev, &lnk_word, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -395,7 +328,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pcie_cap_offset + DLB2_PCI_SLTCTL; + off = pcie_cap_offset + RTE_PCI_EXP_SLTCTL; ret = rte_pci_write_config(pdev, &slt_word, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -403,7 +336,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pcie_cap_offset + DLB2_PCI_RTCTL; + off = pcie_cap_offset + RTE_PCI_EXP_RTCTL; ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -411,7 +344,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2; + off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL2; ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -419,7 +352,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pcie_cap_offset + DLB2_PCI_LNKCTL2; + off = pcie_cap_offset + RTE_PCI_EXP_LNKCTL2; ret = rte_pci_write_config(pdev, &lnk_word2, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -427,7 +360,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pcie_cap_offset + DLB2_PCI_SLTCTL2; + off = pcie_cap_offset + RTE_PCI_EXP_SLTCTL2; ret = rte_pci_write_config(pdev, &slt_word2, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -437,9 +370,9 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) } if (pri_cap_offset >= 0) { - pri_ctrl_word = DLB2_PCI_PRI_CTRL_ENABLE; + pri_ctrl_word = RTE_PCI_PRI_CTRL_ENABLE; - off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ; + off = pri_cap_offset + RTE_PCI_PRI_ALLOC_REQ; ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off); if (ret != 4) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -447,7 +380,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = pri_cap_offset + DLB2_PCI_PRI_CTRL; + off = pri_cap_offset + RTE_PCI_PRI_CTRL; ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", @@ -462,7 +395,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) if (err_cap_offset >= 0) { uint32_t tmp; - off = err_cap_offset + DLB2_PCI_ERR_ROOT_STATUS; + off = err_cap_offset + RTE_PCI_ERR_ROOT_STATUS; if (rte_pci_read_config(pdev, &tmp, 4, off) != 4) tmp = 0; @@ -473,7 +406,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = err_cap_offset + DLB2_PCI_ERR_COR_STATUS; + off = err_cap_offset + RTE_PCI_ERR_COR_STATUS; if (rte_pci_read_config(pdev, &tmp, 4, off) != 4) tmp = 0; @@ -484,7 +417,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = err_cap_offset + DLB2_PCI_ERR_UNCOR_STATUS; + off = err_cap_offset + RTE_PCI_ERR_UNCOR_STATUS; if (rte_pci_read_config(pdev, &tmp, 4, off) != 4) tmp = 0; @@ -506,9 +439,9 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) } } - off = DLB2_PCI_CMD; + off = RTE_PCI_COMMAND; if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) { - cmd &= ~DLB2_PCI_COMMAND_INTX_DISABLE; + cmd &= ~RTE_PCI_COMMAND_INTX_DISABLE; if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { DLB2_LOG_ERR("[%s()] failed to write the pci command\n", __func__); @@ -516,13 +449,12 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) } } - msix_cap_offset = dlb2_pci_find_capability(pdev, - DLB2_PCI_CAP_ID_MSIX); + msix_cap_offset = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX); if (msix_cap_offset >= 0) { - off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS; + off = msix_cap_offset + RTE_PCI_MSIX_FLAGS; if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) { - cmd |= DLB2_PCI_MSIX_FLAGS_ENABLE; - cmd |= DLB2_PCI_MSIX_FLAGS_MASKALL; + cmd |= RTE_PCI_MSIX_FLAGS_ENABLE; + cmd |= RTE_PCI_MSIX_FLAGS_MASKALL; if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { DLB2_LOG_ERR("[%s()] failed to write msix flags\n", __func__); @@ -530,9 +462,9 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) } } - off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS; + off = msix_cap_offset + RTE_PCI_MSIX_FLAGS; if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) { - cmd &= ~DLB2_PCI_MSIX_FLAGS_MASKALL; + cmd &= ~RTE_PCI_MSIX_FLAGS_MASKALL; if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { DLB2_LOG_ERR("[%s()] failed to write msix flags\n", __func__); @@ -541,21 +473,21 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) } } - off = DLB2_PCI_EXT_CAP_ID_ACS; + off = RTE_PCI_EXT_CAP_ID_ACS; acs_cap_offset = rte_pci_find_ext_capability(pdev, off); if (acs_cap_offset >= 0) { uint16_t acs_cap, acs_ctrl, acs_mask; - off = acs_cap_offset + DLB2_PCI_ACS_CAP; + off = acs_cap_offset + RTE_PCI_ACS_CAP; if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2) acs_cap = 0; - off = acs_cap_offset + DLB2_PCI_ACS_CTRL; + off = acs_cap_offset + RTE_PCI_ACS_CTRL; if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2) acs_ctrl = 0; - acs_mask = DLB2_PCI_ACS_SV | DLB2_PCI_ACS_RR; - acs_mask |= (DLB2_PCI_ACS_CR | DLB2_PCI_ACS_UF); + acs_mask = RTE_PCI_ACS_SV | RTE_PCI_ACS_RR; + acs_mask |= (RTE_PCI_ACS_CR | RTE_PCI_ACS_UF); acs_ctrl |= (acs_cap & acs_mask); ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off); @@ -565,15 +497,15 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) return ret; } - off = acs_cap_offset + DLB2_PCI_ACS_CTRL; + off = acs_cap_offset + RTE_PCI_ACS_CTRL; if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2) acs_ctrl = 0; - acs_mask = DLB2_PCI_ACS_RR | DLB2_PCI_ACS_CR; - acs_mask |= DLB2_PCI_ACS_EC; + acs_mask = RTE_PCI_ACS_RR | RTE_PCI_ACS_CR; + acs_mask |= RTE_PCI_ACS_EC; acs_ctrl &= ~acs_mask; - off = acs_cap_offset + DLB2_PCI_ACS_CTRL; + off = acs_cap_offset + RTE_PCI_ACS_CTRL; ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off); if (ret != 2) { DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", diff --git a/drivers/event/dlb2/pf/dlb2_main.h b/drivers/event/dlb2/pf/dlb2_main.h index 4c64d72e9c7..12912a2dec7 100644 --- a/drivers/event/dlb2/pf/dlb2_main.h +++ b/drivers/event/dlb2/pf/dlb2_main.h @@ -52,7 +52,6 @@ struct dlb2_dev { struct dlb2_pp_thread_data { struct dlb2_hw *hw; int pp; - int cpu; bool is_ldb; int cycles; }; diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c index 4b3d16735b9..f615da38132 100644 --- a/drivers/event/dpaa/dpaa_eventdev.c +++ b/drivers/event/dpaa/dpaa_eventdev.c @@ -359,6 +359,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev, RTE_EVENT_DEV_CAP_NONSEQ_MODE | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; + dev_info->max_profiles_per_port = 1; } static int diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index fa1a1ade805..ffc5550f850 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -411,7 +411,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev, RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; - + dev_info->max_profiles_per_port = 1; } static int diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c index 6c5cde24681..785c12f61fb 100644 --- a/drivers/event/dsw/dsw_evdev.c +++ b/drivers/event/dsw/dsw_evdev.c @@ -218,6 +218,7 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused, .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH, .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH, .max_num_events = DSW_MAX_EVENTS, + .max_profiles_per_port = 1, .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE| RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED| RTE_EVENT_DEV_CAP_NONSEQ_MODE| diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 650266b9966..0eb93589810 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -158,7 +158,7 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) RTE_EVENT_DEV_CAP_NONSEQ_MODE | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; - + dev_info->max_profiles_per_port = 1; } static int diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c index 9ce8b39b609..dd257496541 100644 --- a/drivers/event/opdl/opdl_evdev.c +++ b/drivers/event/opdl/opdl_evdev.c @@ -378,6 +378,7 @@ opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, + .max_profiles_per_port = 1, }; *info = evdev_opdl_info; diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c index 8513b9a013e..dc9b1316419 100644 --- a/drivers/event/skeleton/skeleton_eventdev.c +++ b/drivers/event/skeleton/skeleton_eventdev.c @@ -104,6 +104,7 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev, RTE_EVENT_DEV_CAP_EVENT_QOS | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; + dev_info->max_profiles_per_port = 1; } static int diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index cfd659d7748..6d1816b76d7 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -609,6 +609,7 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) RTE_EVENT_DEV_CAP_NONSEQ_MODE | RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | RTE_EVENT_DEV_CAP_MAINTENANCE_FREE), + .max_profiles_per_port = 1, }; *info = evdev_sw_info; diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c index 8bc21944f58..a5fdcf301b5 100644 --- a/drivers/event/sw/sw_evdev_scheduler.c +++ b/drivers/event/sw/sw_evdev_scheduler.c @@ -15,7 +15,7 @@ * CLZ twice is faster than caching the value due to data dependencies */ #define PKT_MASK_TO_IQ(pkts) \ - (__builtin_ctz(pkts | (1 << SW_IQS_MAX))) + (rte_ctz32(pkts | (1 << SW_IQS_MAX))) #if SW_IQS_MAX != 4 #error Misconfigured PRIO_TO_IQ caused by SW_IQS_MAX value change @@ -90,8 +90,10 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, sw->cq_ring_space[cq]--; int head = (p->hist_head++ & (SW_PORT_HIST_LIST-1)); - p->hist_list[head].fid = flow_id; - p->hist_list[head].qid = qid_id; + p->hist_list[head] = (struct sw_hist_list_entry) { + .qid = qid_id, + .fid = flow_id, + }; p->stats.tx_pkts++; qid->stats.tx_pkts++; @@ -162,8 +164,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, qid->stats.tx_pkts++; const int head = (p->hist_head & (SW_PORT_HIST_LIST-1)); - p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id); - p->hist_list[head].qid = qid_id; + p->hist_list[head] = (struct sw_hist_list_entry) { + .qid = qid_id, + .fid = SW_HASH_FLOWID(qe->flow_id), + }; if (keep_order) rob_ring_dequeue(qid->reorder_buffer_freelist, @@ -368,12 +372,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) if (!allow_reorder && !eop) flags = QE_FLAG_VALID; - /* - * if we don't have space for this packet in an IQ, - * then move on to next queue. Technically, for a - * packet that needs reordering, we don't need to check - * here, but it simplifies things not to special-case - */ uint32_t iq_num = PRIO_TO_IQ(qe->priority); struct sw_qid *qid = &sw->qids[qe->queue_id]; @@ -419,7 +417,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) struct reorder_buffer_entry *rob_entry = hist_entry->rob_entry; - hist_entry->rob_entry = NULL; /* Although fragmentation not currently * supported by eventdev API, we support it * here. Open: How do we alert the user that diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c index 3aa8d76ca82..e6646e56698 100644 --- a/drivers/event/sw/sw_evdev_selftest.c +++ b/drivers/event/sw/sw_evdev_selftest.c @@ -28,7 +28,7 @@ #define MAX_PORTS 16 #define MAX_QIDS 16 -#define NUM_PACKETS (1<<18) +#define NUM_PACKETS (1 << 17) #define DEQUEUE_DEPTH 128 static int evdev; @@ -2959,6 +2959,132 @@ dev_stop_flush(struct test *t) /* test to check we can properly flush events */ return -1; } +static int +ordered_atomic_hist_completion(struct test *t) +{ + const int rx_enq = 0; + int err; + + /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */ + if (init(t, 2, 2) < 0 || + create_ports(t, 2) < 0 || + create_ordered_qids(t, 1) < 0 || + create_atomic_qids(t, 1) < 0) + return -1; + + /* Helpers to identify queues */ + const uint8_t qid_ordered = t->qid[0]; + const uint8_t qid_atomic = t->qid[1]; + + /* CQ mapping to QID */ + if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping port 1 qid\n", __LINE__); + return -1; + } + if (rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1) != 1) { + printf("%d: error mapping port 1 qid\n", __LINE__); + return -1; + } + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* Enqueue 1x ordered event, to be RELEASE-ed by the worker + * CPU, which may cause hist-list corruption (by not comleting) + */ + struct rte_event ord_ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = qid_ordered, + .event_type = RTE_EVENT_TYPE_CPU, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + }; + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ord_ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + /* call the scheduler. This schedules the above event as a single + * event in an ORDERED queue, to the worker. + */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* Dequeue ORDERED event 0 from port 1, so that we can then drop */ + struct rte_event ev; + if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) { + printf("%d: failed to dequeue\n", __LINE__); + return -1; + } + + /* drop the ORDERED event. Here the history list should be completed, + * but might not be if the hist-list bug exists. Call scheduler to make + * it act on the RELEASE that was enqueued. + */ + rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1); + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* Enqueue 1x atomic event, to then FORWARD to trigger atomic hist-list + * completion. If the bug exists, the ORDERED entry may be completed in + * error (aka, using the ORDERED-ROB for the ATOMIC event). This is the + * main focus of this unit test. + */ + { + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = qid_atomic, + .event_type = RTE_EVENT_TYPE_CPU, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .flow_id = 123, + }; + + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* Deq ATM event, then forward it for more than HIST_LIST_SIZE times, + * to re-use the history list entry that may be corrupted previously. + */ + for (int i = 0; i < SW_PORT_HIST_LIST + 2; i++) { + if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) { + printf("%d: failed to dequeue, did corrupt ORD hist " + "list steal this ATM event?\n", __LINE__); + return -1; + } + + /* Re-enqueue the ATM event as FWD, trigger hist-list. */ + ev.op = RTE_EVENT_OP_FORWARD; + err = rte_event_enqueue_burst(evdev, t->port[1], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + } + + /* If HIST-LIST + N count of dequeues succeed above, the hist list + * has not been corrupted. If it is corrupted, the ATM event is pushed + * into the ORDERED-ROB and will not dequeue. + */ + + /* release the ATM event that's been forwarded HIST_LIST times */ + err = rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + cleanup(t); + return 0; +} + static int worker_loopback_worker_fn(void *arg) { @@ -3388,6 +3514,12 @@ test_sw_eventdev(void) printf("ERROR - Stop Flush test FAILED.\n"); goto test_fail; } + printf("*** Running Ordered & Atomic hist-list completion test...\n"); + ret = ordered_atomic_hist_completion(t); + if (ret != 0) { + printf("ERROR - Ordered & Atomic hist-list test FAILED.\n"); + goto test_fail; + } if (rte_lcore_count() >= 3) { printf("*** Running Worker loopback test...\n"); ret = worker_loopback(t, 0); diff --git a/drivers/gpu/cuda/gdrcopy.c b/drivers/gpu/cuda/gdrcopy.c index 322a5dbeb20..bd56b73ce4e 100644 --- a/drivers/gpu/cuda/gdrcopy.c +++ b/drivers/gpu/cuda/gdrcopy.c @@ -6,6 +6,8 @@ #ifdef DRIVERS_GPU_CUDA_GDRCOPY_H +#include + static void *gdrclib; static gdr_t (*sym_gdr_open)(void); static int (*sym_gdr_pin_buffer)(gdr_t g, unsigned long addr, size_t size, diff --git a/drivers/gpu/cuda/meson.build b/drivers/gpu/cuda/meson.build index 784fa8bf0da..d54473f1487 100644 --- a/drivers/gpu/cuda/meson.build +++ b/drivers/gpu/cuda/meson.build @@ -21,5 +21,5 @@ if cc.has_header('gdrapi.h') dpdk_conf.set('DRIVERS_GPU_CUDA_GDRCOPY_H', 1) endif -deps += ['gpudev', 'pci', 'bus_pci'] +deps += ['pci', 'bus_pci'] sources = files('cuda.c', 'gdrcopy.c') diff --git a/drivers/gpu/meson.build b/drivers/gpu/meson.build index 601bedcd611..b6edd12678d 100644 --- a/drivers/gpu/meson.build +++ b/drivers/gpu/meson.build @@ -2,3 +2,5 @@ # Copyright (c) 2021 NVIDIA Corporation & Affiliates drivers = [ 'cuda' ] + +std_deps = [ 'gpudev' ] diff --git a/drivers/mempool/cnxk/cn10k_mempool_ops.c b/drivers/mempool/cnxk/cn10k_mempool_ops.c index ff0015d8ded..2a5aad00086 100644 --- a/drivers/mempool/cnxk/cn10k_mempool_ops.c +++ b/drivers/mempool/cnxk/cn10k_mempool_ops.c @@ -10,6 +10,7 @@ #define BATCH_ALLOC_SZ ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS #define BATCH_OP_DATA_TABLE_MZ_NAME "batch_op_data_table_mz" #define BATCH_ALLOC_WAIT_US 5 +#define BATCH_ALLOC_RETRIES 4 enum batch_op_status { BATCH_ALLOC_OP_NOT_ISSUED = 0, @@ -25,6 +26,7 @@ struct batch_op_mem { struct batch_op_data { uint64_t lmt_addr; + uint32_t max_async_batch; struct batch_op_mem mem[RTE_MAX_LCORE] __rte_aligned(ROC_ALIGN); }; @@ -97,6 +99,10 @@ batch_op_init(struct rte_mempool *mp) } op_data->lmt_addr = roc_idev_lmt_base_addr_get(); + op_data->max_async_batch = + RTE_MIN((unsigned int)BATCH_ALLOC_SZ, + RTE_ALIGN_CEIL(mp->cache_size, ROC_ALIGN / 8)); + batch_op_data_set(mp->pool_id, op_data); rte_wmb(); @@ -117,13 +123,17 @@ batch_op_fini(struct rte_mempool *mp) return; } + /* If max_async_batch == 0, then batch mem will be empty */ + if (op_data->max_async_batch == 0) + goto free_op_data; + rte_wmb(); for (i = 0; i < RTE_MAX_LCORE; i++) { struct batch_op_mem *mem = &op_data->mem[i]; if (mem->status == BATCH_ALLOC_OP_ISSUED) { mem->sz = roc_npa_aura_batch_alloc_extract( - mem->objs, mem->objs, BATCH_ALLOC_SZ); + mem->objs, mem->objs, op_data->max_async_batch); mem->status = BATCH_ALLOC_OP_DONE; } if (mem->status == BATCH_ALLOC_OP_DONE) { @@ -133,6 +143,7 @@ batch_op_fini(struct rte_mempool *mp) } } +free_op_data: rte_free(op_data); batch_op_data_set(mp->pool_id, NULL); rte_wmb(); @@ -151,6 +162,12 @@ cn10k_mempool_enq(struct rte_mempool *mp, void *const *obj_table, */ rte_io_wmb(); + /* For non-EAL threads, rte_lcore_id() will not be valid. Hence + * fallback to bulk alloc + */ + if (unlikely(rte_lcore_id() == LCORE_ID_ANY)) + return cnxk_mempool_enq(mp, obj_table, n); + if (n == 1) { roc_npa_aura_op_free(mp->pool_id, 1, ptr[0]); return 0; @@ -172,6 +189,9 @@ cn10k_mempool_get_count(const struct rte_mempool *mp) int i; op_data = batch_op_data_get(mp->pool_id); + /* If max_async_batch == 0, then batch alloc mem will be empty */ + if (op_data->max_async_batch == 0) + goto npa_pool_count; rte_wmb(); for (i = 0; i < RTE_MAX_LCORE; i++) { @@ -179,19 +199,27 @@ cn10k_mempool_get_count(const struct rte_mempool *mp) if (mem->status == BATCH_ALLOC_OP_ISSUED) count += roc_npa_aura_batch_alloc_count( - mem->objs, BATCH_ALLOC_SZ, BATCH_ALLOC_WAIT_US); + mem->objs, op_data->max_async_batch, + BATCH_ALLOC_WAIT_US); if (mem->status == BATCH_ALLOC_OP_DONE) count += mem->sz; } +npa_pool_count: count += cnxk_mempool_get_count(mp); return count; } -static int __rte_hot -cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) +static inline unsigned int __rte_hot +mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return cnxk_mempool_deq(mp, obj_table, n) ? 0 : n; +} + +static inline unsigned int __rte_hot +mempool_deq_batch_async(struct rte_mempool *mp, void **obj_table, unsigned int n) { struct batch_op_data *op_data; struct batch_op_mem *mem; @@ -205,24 +233,24 @@ cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) /* Issue batch alloc */ if (mem->status == BATCH_ALLOC_OP_NOT_ISSUED) { - rc = roc_npa_aura_batch_alloc_issue(mp->pool_id, mem->objs, - BATCH_ALLOC_SZ, 0, 1); + rc = roc_npa_aura_batch_alloc_issue( + mp->pool_id, mem->objs, op_data->max_async_batch, 0, 1); /* If issue fails, try falling back to default alloc */ if (unlikely(rc)) - return cnxk_mempool_deq(mp, obj_table, n); + return mempool_deq(mp, obj_table, n); mem->status = BATCH_ALLOC_OP_ISSUED; } - retry = 4; + retry = BATCH_ALLOC_RETRIES; while (loop) { unsigned int cur_sz; if (mem->status == BATCH_ALLOC_OP_ISSUED) { mem->sz = roc_npa_aura_batch_alloc_extract( - mem->objs, mem->objs, BATCH_ALLOC_SZ); + mem->objs, mem->objs, op_data->max_async_batch); /* If partial alloc reduce the retry count */ - retry -= (mem->sz != BATCH_ALLOC_SZ); + retry -= (mem->sz != op_data->max_async_batch); /* Break the loop if retry count exhausted */ loop = !!retry; mem->status = BATCH_ALLOC_OP_DONE; @@ -244,13 +272,78 @@ cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) /* Issue next batch alloc if pointers are exhausted */ if (mem->sz == 0) { rc = roc_npa_aura_batch_alloc_issue( - mp->pool_id, mem->objs, BATCH_ALLOC_SZ, 0, 1); + mp->pool_id, mem->objs, + op_data->max_async_batch, 0, 1); /* Break loop if issue failed and set status */ loop &= !rc; mem->status = !rc; } } + return count; +} + +static inline unsigned int __rte_hot +mempool_deq_batch_sync(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + struct batch_op_data *op_data; + struct batch_op_mem *mem; + unsigned int count = 0; + int tid, retry, rc; + + op_data = batch_op_data_get(mp->pool_id); + tid = rte_lcore_id(); + mem = &op_data->mem[tid]; + + retry = BATCH_ALLOC_RETRIES; + while (count != n && retry) { + unsigned int cur_sz, batch_sz; + + cur_sz = n - count; + batch_sz = RTE_MIN(BATCH_ALLOC_SZ, (int)cur_sz); + + /* Issue batch alloc */ + rc = roc_npa_aura_batch_alloc_issue(mp->pool_id, mem->objs, + batch_sz, 0, 1); + + /* If issue fails, try falling back to default alloc */ + if (unlikely(rc)) + return count + + mempool_deq(mp, obj_table + count, n - count); + + cur_sz = roc_npa_aura_batch_alloc_extract(mem->objs, mem->objs, + batch_sz); + + /* Dequeue the pointers */ + memcpy(&obj_table[count], mem->objs, + cur_sz * sizeof(uintptr_t)); + count += cur_sz; + + /* If partial alloc reduce the retry count */ + retry -= (batch_sz != cur_sz); + } + + return count; +} + +static int __rte_hot +cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + struct batch_op_data *op_data; + unsigned int count = 0; + + /* For non-EAL threads, rte_lcore_id() will not be valid. Hence + * fallback to bulk alloc + */ + if (unlikely(rte_lcore_id() == LCORE_ID_ANY)) + return cnxk_mempool_deq(mp, obj_table, n); + + op_data = batch_op_data_get(mp->pool_id); + if (op_data->max_async_batch) + count = mempool_deq_batch_async(mp, obj_table, n); + else + count = mempool_deq_batch_sync(mp, obj_table, n); + if (unlikely(count != n)) { /* No partial alloc allowed. Free up allocated pointers */ cn10k_mempool_enq(mp, obj_table, count); diff --git a/drivers/mempool/cnxk/cnxk_mempool.c b/drivers/mempool/cnxk/cnxk_mempool.c index 78caf987d07..1181b6f2650 100644 --- a/drivers/mempool/cnxk/cnxk_mempool.c +++ b/drivers/mempool/cnxk/cnxk_mempool.c @@ -164,6 +164,7 @@ static const struct rte_pci_id npa_pci_map[] = { CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_NPA_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_NPA_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_NPA_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_NPA_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_NPA_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_NPA_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_NPA_PF), @@ -174,6 +175,7 @@ static const struct rte_pci_id npa_pci_map[] = { CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_NPA_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_NPA_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_NPA_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_NPA_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_NPA_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_NPA_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_NPA_VF), diff --git a/drivers/meson.build b/drivers/meson.build index c909070c30c..8c775bbe62e 100644 --- a/drivers/meson.build +++ b/drivers/meson.build @@ -1,6 +1,10 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017-2019 Intel Corporation +if is_ms_compiler + subdir_done() +endif + fs = import('fs') # Defines the order of dependencies evaluation @@ -37,7 +41,9 @@ disable_drivers = run_command(list_dir_globs, disable_drivers, check: true).stdo # add cmdline enabled drivers and meson enabled drivers together enable_drivers = ',' + get_option('enable_drivers') enable_drivers = run_command(list_dir_globs, enable_drivers, check: true).stdout().split() +require_drivers = true if enable_drivers.length() == 0 + require_drivers = false enable_drivers = run_command(list_dir_globs, '*/*', check: true).stdout().split() endif @@ -151,6 +157,12 @@ foreach subpath:subdirs build = false reason = 'requires IOVA in mbuf (set enable_iova_as_pa option)' endif + # error out if we can't build a driver and that driver was explicitly requested, + # i.e. not via wildcard. + if not build and require_drivers and get_option('enable_drivers').contains(drv_path) + error('Cannot build explicitly requested driver "@0@".\n'.format(drv_path) + +'\tReason: ' + reason) + endif # get dependency objs from strings shared_deps = ext_deps @@ -167,6 +179,12 @@ foreach subpath:subdirs endif message('Disabling @1@ [@2@]: missing internal dependency "@0@"' .format(d, name, 'drivers/' + drv_path)) + # error out if we can't build a driver and that driver was explicitly + # requested, i.e. not via wildcard. + if require_drivers and get_option('enable_drivers').contains(drv_path) + error('Cannot build explicitly requested driver "@0@".\n'.format(drv_path) + +'\tPlease enable missing dependency "@0@"'.format(d)) + endif else shared_deps += [get_variable('shared_rte_' + d)] static_deps += [get_variable('static_rte_' + d)] diff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h index 6ca0b0bb6e2..c73bf7d001a 100644 --- a/drivers/ml/cnxk/cn10k_ml_dev.h +++ b/drivers/ml/cnxk/cn10k_ml_dev.h @@ -30,6 +30,9 @@ /* Maximum number of descriptors per queue-pair */ #define ML_CN10K_MAX_DESC_PER_QP 1024 +/* Maximum number of inputs / outputs per model */ +#define ML_CN10K_MAX_INPUT_OUTPUT 32 + /* Maximum number of segments for IO data */ #define ML_CN10K_MAX_SEGMENTS 1 diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c index 92c47d39baf..e0b750cd8ef 100644 --- a/drivers/ml/cnxk/cn10k_ml_model.c +++ b/drivers/ml/cnxk/cn10k_ml_model.c @@ -366,6 +366,12 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_ addr->total_input_sz_q = 0; for (i = 0; i < metadata->model.num_input; i++) { if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { + addr->input[i].nb_dims = 4; + addr->input[i].shape[0] = metadata->input1[i].shape.w; + addr->input[i].shape[1] = metadata->input1[i].shape.x; + addr->input[i].shape[2] = metadata->input1[i].shape.y; + addr->input[i].shape[3] = metadata->input1[i].shape.z; + addr->input[i].nb_elements = metadata->input1[i].shape.w * metadata->input1[i].shape.x * metadata->input1[i].shape.y * metadata->input1[i].shape.z; @@ -386,6 +392,13 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_ addr->input[i].sz_q); } else { j = i - MRVL_ML_NUM_INPUT_OUTPUT_1; + + addr->input[i].nb_dims = 4; + addr->input[i].shape[0] = metadata->input2[j].shape.w; + addr->input[i].shape[1] = metadata->input2[j].shape.x; + addr->input[i].shape[2] = metadata->input2[j].shape.y; + addr->input[i].shape[3] = metadata->input2[j].shape.z; + addr->input[i].nb_elements = metadata->input2[j].shape.w * metadata->input2[j].shape.x * metadata->input2[j].shape.y * metadata->input2[j].shape.z; @@ -412,6 +425,8 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_ addr->total_output_sz_d = 0; for (i = 0; i < metadata->model.num_output; i++) { if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { + addr->output[i].nb_dims = 1; + addr->output[i].shape[0] = metadata->output1[i].size; addr->output[i].nb_elements = metadata->output1[i].size; addr->output[i].sz_d = addr->output[i].nb_elements * @@ -426,6 +441,9 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_ model->model_id, i, addr->output[i].sz_d, addr->output[i].sz_q); } else { j = i - MRVL_ML_NUM_INPUT_OUTPUT_1; + + addr->output[i].nb_dims = 1; + addr->output[i].shape[0] = metadata->output2[j].size; addr->output[i].nb_elements = metadata->output2[j].size; addr->output[i].sz_d = addr->output[i].nb_elements * @@ -498,16 +516,20 @@ void cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model) { struct cn10k_ml_model_metadata *metadata; + struct cn10k_ml_model_addr *addr; struct rte_ml_model_info *info; struct rte_ml_io_info *output; struct rte_ml_io_info *input; + struct cn10k_ml_dev *mldev; uint8_t i; uint8_t j; + mldev = dev->data->dev_private; metadata = &model->metadata; info = PLT_PTR_CAST(model->info); input = PLT_PTR_ADD(info, sizeof(struct rte_ml_model_info)); output = PLT_PTR_ADD(input, metadata->model.num_input * sizeof(struct rte_ml_io_info)); + addr = &model->addr; /* Set model info */ memset(info, 0, sizeof(struct rte_ml_model_info)); @@ -517,7 +539,9 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model) metadata->model.version[3]); info->model_id = model->model_id; info->device_id = dev->data->dev_id; - info->batch_size = model->batch_size; + info->io_layout = RTE_ML_IO_LAYOUT_PACKED; + info->min_batches = model->batch_size; + info->max_batches = mldev->fw.req->jd.fw_load.cap.s.max_num_batches / model->batch_size; info->nb_inputs = metadata->model.num_input; info->input_info = input; info->nb_outputs = metadata->model.num_output; @@ -529,24 +553,25 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model) if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { rte_memcpy(input[i].name, metadata->input1[i].input_name, MRVL_ML_INPUT_NAME_LEN); - input[i].dtype = metadata->input1[i].input_type; - input[i].qtype = metadata->input1[i].model_input_type; - input[i].shape.format = metadata->input1[i].shape.format; - input[i].shape.w = metadata->input1[i].shape.w; - input[i].shape.x = metadata->input1[i].shape.x; - input[i].shape.y = metadata->input1[i].shape.y; - input[i].shape.z = metadata->input1[i].shape.z; + input[i].nb_dims = addr->input[i].nb_dims; + input[i].shape = addr->input[i].shape; + input[i].type = metadata->input1[i].model_input_type; + input[i].nb_elements = addr->input[i].nb_elements; + input[i].size = + addr->input[i].nb_elements * + rte_ml_io_type_size_get(metadata->input1[i].model_input_type); } else { j = i - MRVL_ML_NUM_INPUT_OUTPUT_1; + rte_memcpy(input[i].name, metadata->input2[j].input_name, MRVL_ML_INPUT_NAME_LEN); - input[i].dtype = metadata->input2[j].input_type; - input[i].qtype = metadata->input2[j].model_input_type; - input[i].shape.format = metadata->input2[j].shape.format; - input[i].shape.w = metadata->input2[j].shape.w; - input[i].shape.x = metadata->input2[j].shape.x; - input[i].shape.y = metadata->input2[j].shape.y; - input[i].shape.z = metadata->input2[j].shape.z; + input[i].nb_dims = addr->input[i].nb_dims; + input[i].shape = addr->input[i].shape; + input[i].type = metadata->input2[j].model_input_type; + input[i].nb_elements = addr->input[i].nb_elements; + input[i].size = + addr->input[i].nb_elements * + rte_ml_io_type_size_get(metadata->input2[j].model_input_type); } } @@ -555,24 +580,25 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model) if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { rte_memcpy(output[i].name, metadata->output1[i].output_name, MRVL_ML_OUTPUT_NAME_LEN); - output[i].dtype = metadata->output1[i].output_type; - output[i].qtype = metadata->output1[i].model_output_type; - output[i].shape.format = RTE_ML_IO_FORMAT_1D; - output[i].shape.w = metadata->output1[i].size; - output[i].shape.x = 1; - output[i].shape.y = 1; - output[i].shape.z = 1; + output[i].nb_dims = addr->output[i].nb_dims; + output[i].shape = addr->output[i].shape; + output[i].type = metadata->output1[i].model_output_type; + output[i].nb_elements = addr->output[i].nb_elements; + output[i].size = + addr->output[i].nb_elements * + rte_ml_io_type_size_get(metadata->output1[i].model_output_type); } else { j = i - MRVL_ML_NUM_INPUT_OUTPUT_1; + rte_memcpy(output[i].name, metadata->output2[j].output_name, MRVL_ML_OUTPUT_NAME_LEN); - output[i].dtype = metadata->output2[j].output_type; - output[i].qtype = metadata->output2[j].model_output_type; - output[i].shape.format = RTE_ML_IO_FORMAT_1D; - output[i].shape.w = metadata->output2[j].size; - output[i].shape.x = 1; - output[i].shape.y = 1; - output[i].shape.z = 1; + output[i].nb_dims = addr->output[i].nb_dims; + output[i].shape = addr->output[i].shape; + output[i].type = metadata->output2[j].model_output_type; + output[i].nb_elements = addr->output[i].nb_elements; + output[i].size = + addr->output[i].nb_elements * + rte_ml_io_type_size_get(metadata->output2[j].model_output_type); } } } diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h index 1f689363fc4..4cc0744891b 100644 --- a/drivers/ml/cnxk/cn10k_ml_model.h +++ b/drivers/ml/cnxk/cn10k_ml_model.h @@ -409,6 +409,12 @@ struct cn10k_ml_model_addr { /* Input address and size */ struct { + /* Number of dimensions in shape */ + uint32_t nb_dims; + + /* Shape of input */ + uint32_t shape[4]; + /* Number of elements */ uint32_t nb_elements; @@ -421,6 +427,12 @@ struct cn10k_ml_model_addr { /* Output address and size */ struct { + /* Number of dimensions in shape */ + uint32_t nb_dims; + + /* Shape of input */ + uint32_t shape[4]; + /* Number of elements */ uint32_t nb_elements; diff --git a/drivers/ml/cnxk/cn10k_ml_ocm.c b/drivers/ml/cnxk/cn10k_ml_ocm.c index 93505c9c09b..6fb0bb620ed 100644 --- a/drivers/ml/cnxk/cn10k_ml_ocm.c +++ b/drivers/ml/cnxk/cn10k_ml_ocm.c @@ -494,7 +494,7 @@ cn10k_ml_ocm_print(struct rte_ml_dev *dev, FILE *fp) wb_pages = 0 - ocm->tile_ocm_info[tile_id].scratch_pages; for (word_id = 0; word_id < mldev->ocm.mask_words; word_id++) wb_pages += - __builtin_popcount(ocm->tile_ocm_info[tile_id].ocm_mask[word_id]); + rte_popcount32(ocm->tile_ocm_info[tile_id].ocm_mask[word_id]); fprintf(fp, "tile = %2u, scratch_pages = %4u," diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c index 656467d8918..4abf4ae0d39 100644 --- a/drivers/ml/cnxk/cn10k_ml_ops.c +++ b/drivers/ml/cnxk/cn10k_ml_ops.c @@ -321,8 +321,8 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp) fprintf(fp, "\n"); print_line(fp, LINE_LEN); - fprintf(fp, "%8s %16s %12s %18s %12s %14s\n", "input", "input_name", "input_type", - "model_input_type", "quantize", "format"); + fprintf(fp, "%8s %16s %12s %18s %12s\n", "input", "input_name", "input_type", + "model_input_type", "quantize"); print_line(fp, LINE_LEN); for (i = 0; i < model->metadata.model.num_input; i++) { if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { @@ -335,12 +335,10 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp) fprintf(fp, "%*s ", 18, str); fprintf(fp, "%*s", 12, (model->metadata.input1[i].quantize == 1 ? "Yes" : "No")); - rte_ml_io_format_to_str(model->metadata.input1[i].shape.format, str, - STR_LEN); - fprintf(fp, "%*s", 16, str); fprintf(fp, "\n"); } else { j = i - MRVL_ML_NUM_INPUT_OUTPUT_1; + fprintf(fp, "%8u ", i); fprintf(fp, "%*s ", 16, model->metadata.input2[j].input_name); rte_ml_io_type_to_str(model->metadata.input2[j].input_type, str, STR_LEN); @@ -350,9 +348,6 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp) fprintf(fp, "%*s ", 18, str); fprintf(fp, "%*s", 12, (model->metadata.input2[j].quantize == 1 ? "Yes" : "No")); - rte_ml_io_format_to_str(model->metadata.input2[j].shape.format, str, - STR_LEN); - fprintf(fp, "%*s", 16, str); fprintf(fp, "\n"); } } @@ -476,9 +471,9 @@ cn10k_ml_prep_fp_job_descriptor(struct rte_ml_dev *dev, struct cn10k_ml_req *req req->jd.hdr.sp_flags = 0x0; req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result); req->jd.model_run.input_ddr_addr = - PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->input.addr)); + PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->input[0]->addr)); req->jd.model_run.output_ddr_addr = - PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->output.addr)); + PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->output[0]->addr)); req->jd.model_run.num_batches = op->nb_batches; } @@ -861,7 +856,11 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint static int cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id) { + struct rte_ml_model_info *info; struct cn10k_ml_model *model; + struct rte_ml_buff_seg seg[2]; + struct rte_ml_buff_seg *inp; + struct rte_ml_buff_seg *out; struct rte_ml_op op; char str[RTE_MEMZONE_NAMESIZE]; @@ -869,12 +868,22 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id) uint64_t isize = 0; uint64_t osize = 0; int ret = 0; + uint32_t i; model = dev->data->models[model_id]; + info = (struct rte_ml_model_info *)model->info; + inp = &seg[0]; + out = &seg[1]; /* Create input and output buffers. */ - rte_ml_io_input_size_get(dev->data->dev_id, model_id, model->batch_size, &isize, NULL); - rte_ml_io_output_size_get(dev->data->dev_id, model_id, model->batch_size, &osize, NULL); + for (i = 0; i < info->nb_inputs; i++) + isize += info->input_info[i].size; + + for (i = 0; i < info->nb_outputs; i++) + osize += info->output_info[i].size; + + isize = model->batch_size * isize; + osize = model->batch_size * osize; snprintf(str, RTE_MEMZONE_NAMESIZE, "%s_%u", "ml_dummy_io", model_id); mz = plt_memzone_reserve_aligned(str, isize + osize, 0, ML_CN10K_ALIGN_SIZE); @@ -882,17 +891,22 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id) return -ENOMEM; memset(mz->addr, 0, isize + osize); + seg[0].addr = mz->addr; + seg[0].iova_addr = mz->iova; + seg[0].length = isize; + seg[0].next = NULL; + + seg[1].addr = PLT_PTR_ADD(mz->addr, isize); + seg[1].iova_addr = mz->iova + isize; + seg[1].length = osize; + seg[1].next = NULL; + op.model_id = model_id; op.nb_batches = model->batch_size; op.mempool = NULL; - op.input.addr = mz->addr; - op.input.length = isize; - op.input.next = NULL; - - op.output.addr = PLT_PTR_ADD(op.input.addr, isize); - op.output.length = osize; - op.output.next = NULL; + op.input = &inp; + op.output = &out; memset(model->req, 0, sizeof(struct cn10k_ml_req)); ret = cn10k_ml_inference_sync(dev, &op); @@ -924,8 +938,9 @@ cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info) else if (strcmp(mldev->fw.poll_mem, "ddr") == 0) dev_info->max_desc = ML_CN10K_MAX_DESC_PER_QP; + dev_info->max_io = ML_CN10K_MAX_INPUT_OUTPUT; dev_info->max_segments = ML_CN10K_MAX_SEGMENTS; - dev_info->min_align_size = ML_CN10K_ALIGN_SIZE; + dev_info->align_size = ML_CN10K_ALIGN_SIZE; return 0; } @@ -2096,63 +2111,14 @@ cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *bu } static int -cn10k_ml_io_input_size_get(struct rte_ml_dev *dev, uint16_t model_id, uint32_t nb_batches, - uint64_t *input_qsize, uint64_t *input_dsize) -{ - struct cn10k_ml_model *model; - - model = dev->data->models[model_id]; - - if (model == NULL) { - plt_err("Invalid model_id = %u", model_id); - return -EINVAL; - } - - if (input_qsize != NULL) - *input_qsize = PLT_U64_CAST(model->addr.total_input_sz_q * - PLT_DIV_CEIL(nb_batches, model->batch_size)); - - if (input_dsize != NULL) - *input_dsize = PLT_U64_CAST(model->addr.total_input_sz_d * - PLT_DIV_CEIL(nb_batches, model->batch_size)); - - return 0; -} - -static int -cn10k_ml_io_output_size_get(struct rte_ml_dev *dev, uint16_t model_id, uint32_t nb_batches, - uint64_t *output_qsize, uint64_t *output_dsize) -{ - struct cn10k_ml_model *model; - - model = dev->data->models[model_id]; - - if (model == NULL) { - plt_err("Invalid model_id = %u", model_id); - return -EINVAL; - } - - if (output_qsize != NULL) - *output_qsize = PLT_U64_CAST(model->addr.total_output_sz_q * - PLT_DIV_CEIL(nb_batches, model->batch_size)); - - if (output_dsize != NULL) - *output_dsize = PLT_U64_CAST(model->addr.total_output_sz_d * - PLT_DIV_CEIL(nb_batches, model->batch_size)); - - return 0; -} - -static int -cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batches, void *dbuffer, - void *qbuffer) +cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buff_seg **dbuffer, + struct rte_ml_buff_seg **qbuffer) { struct cn10k_ml_model *model; uint8_t model_input_type; uint8_t *lcl_dbuffer; uint8_t *lcl_qbuffer; uint8_t input_type; - uint32_t batch_id; float qscale; uint32_t i; uint32_t j; @@ -2165,11 +2131,9 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batc return -EINVAL; } - lcl_dbuffer = dbuffer; - lcl_qbuffer = qbuffer; - batch_id = 0; + lcl_dbuffer = dbuffer[0]->addr; + lcl_qbuffer = qbuffer[0]->addr; -next_batch: for (i = 0; i < model->metadata.model.num_input; i++) { if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { input_type = model->metadata.input1[i].input_type; @@ -2223,23 +2187,18 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batc lcl_qbuffer += model->addr.input[i].sz_q; } - batch_id++; - if (batch_id < PLT_DIV_CEIL(nb_batches, model->batch_size)) - goto next_batch; - return 0; } static int -cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batches, - void *qbuffer, void *dbuffer) +cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buff_seg **qbuffer, + struct rte_ml_buff_seg **dbuffer) { struct cn10k_ml_model *model; uint8_t model_output_type; uint8_t *lcl_qbuffer; uint8_t *lcl_dbuffer; uint8_t output_type; - uint32_t batch_id; float dscale; uint32_t i; uint32_t j; @@ -2252,11 +2211,9 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_ba return -EINVAL; } - lcl_dbuffer = dbuffer; - lcl_qbuffer = qbuffer; - batch_id = 0; + lcl_dbuffer = dbuffer[0]->addr; + lcl_qbuffer = qbuffer[0]->addr; -next_batch: for (i = 0; i < model->metadata.model.num_output; i++) { if (i < MRVL_ML_NUM_INPUT_OUTPUT_1) { output_type = model->metadata.output1[i].output_type; @@ -2311,10 +2268,6 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_ba lcl_dbuffer += model->addr.output[i].sz_d; } - batch_id++; - if (batch_id < PLT_DIV_CEIL(nb_batches, model->batch_size)) - goto next_batch; - return 0; } @@ -2635,8 +2588,6 @@ struct rte_ml_dev_ops cn10k_ml_ops = { .model_params_update = cn10k_ml_model_params_update, /* I/O ops */ - .io_input_size_get = cn10k_ml_io_input_size_get, - .io_output_size_get = cn10k_ml_io_output_size_get, .io_quantize = cn10k_ml_io_quantize, .io_dequantize = cn10k_ml_io_dequantize, }; diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index c13a0942aa1..397a32db588 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -313,7 +313,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { + struct pmd_internals *internals = dev->data->dev_private; + uint16_t i; + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + for (i = 0; i < internals->nb_queues; i++) { + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } return 0; } @@ -341,6 +348,8 @@ eth_dev_stop(struct rte_eth_dev *dev) internals->rx_queue[i].sockfd = -1; internals->tx_queue[i].sockfd = -1; + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; diff --git a/drivers/net/af_xdp/meson.build b/drivers/net/af_xdp/meson.build index 9a8dbb4d49a..9f33e57fa2d 100644 --- a/drivers/net/af_xdp/meson.build +++ b/drivers/net/af_xdp/meson.build @@ -48,6 +48,14 @@ endif if build xsk_check_prefix = ''' +#ifndef typeof +#define typeof __typeof__ +#endif + +#ifndef asm +#define asm __asm__ +#endif + #ifdef RTE_NET_AF_XDP_LIBXDP #include #else @@ -56,17 +64,17 @@ if build ''' if cc.has_function('xsk_socket__create_shared', prefix : xsk_check_prefix, - dependencies : ext_deps) + dependencies : ext_deps, args: cflags) cflags += ['-DRTE_NET_AF_XDP_SHARED_UMEM'] endif if cc.has_function('bpf_object__next_program', prefix : '#include ', - dependencies : bpf_dep) + dependencies : bpf_dep, args: cflags) cflags += ['-DRTE_NET_AF_XDP_LIBBPF_OBJ_OPEN'] endif if cc.has_function('bpf_xdp_attach', prefix : '#include ', - dependencies : bpf_dep) + dependencies : bpf_dep, args: cflags) cflags += ['-DRTE_NET_AF_XDP_LIBBPF_XDP_ATTACH'] endif endif diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index c7786cc53a5..0cc51223ba3 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -694,7 +694,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { + uint16_t i; + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } return 0; } @@ -703,7 +709,14 @@ eth_dev_start(struct rte_eth_dev *dev) static int eth_dev_stop(struct rte_eth_dev *dev) { + uint16_t i; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + return 0; } diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c index b2995427c8c..0ffd4b9e9e4 100644 --- a/drivers/net/ark/ark_ethdev.c +++ b/drivers/net/ark/ark_ethdev.c @@ -17,7 +17,6 @@ #include "ark_mpu.h" #include "ark_ddm.h" #include "ark_udm.h" -#include "ark_rqp.h" #include "ark_pktdir.h" #include "ark_pktgen.h" #include "ark_pktchkr.h" @@ -107,36 +106,32 @@ static const struct rte_pci_id pci_id_ark_map[] = { * This structure is used to statically define the capabilities * of supported devices. * Capabilities: - * rqpacing - - * Some HW variants require that PCIe read-requests be correctly throttled. - * This is called "rqpacing" and has to do with credit and flow control - * on certain Arkville implementations. + * isvf -- defined for function id that are virtual */ struct ark_caps { - bool rqpacing; bool isvf; }; struct ark_dev_caps { uint32_t device_id; struct ark_caps caps; }; -#define SET_DEV_CAPS(id, rqp, vf) \ - {id, {.rqpacing = rqp, .isvf = vf} } +#define SET_DEV_CAPS(id, vf) \ + {id, {.isvf = vf} } static const struct ark_dev_caps ark_device_caps[] = { - SET_DEV_CAPS(0x100d, true, false), - SET_DEV_CAPS(0x100e, true, false), - SET_DEV_CAPS(0x100f, true, false), - SET_DEV_CAPS(0x1010, false, false), - SET_DEV_CAPS(0x1017, true, false), - SET_DEV_CAPS(0x1018, true, false), - SET_DEV_CAPS(0x1019, true, false), - SET_DEV_CAPS(0x101a, true, false), - SET_DEV_CAPS(0x101b, true, false), - SET_DEV_CAPS(0x101c, true, true), - SET_DEV_CAPS(0x101e, false, false), - SET_DEV_CAPS(0x101f, false, false), + SET_DEV_CAPS(0x100d, false), + SET_DEV_CAPS(0x100e, false), + SET_DEV_CAPS(0x100f, false), + SET_DEV_CAPS(0x1010, false), + SET_DEV_CAPS(0x1017, false), + SET_DEV_CAPS(0x1018, false), + SET_DEV_CAPS(0x1019, false), + SET_DEV_CAPS(0x101a, false), + SET_DEV_CAPS(0x101b, false), + SET_DEV_CAPS(0x101c, true), + SET_DEV_CAPS(0x101e, false), + SET_DEV_CAPS(0x101f, false), {.device_id = 0,} }; @@ -300,7 +295,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) int ret; int port_count = 1; int p; - bool rqpacing = false; + uint16_t num_queues; ark->eth_dev = dev; @@ -318,7 +313,6 @@ eth_ark_dev_init(struct rte_eth_dev *dev) p = 0; while (ark_device_caps[p].device_id != 0) { if (pci_dev->id.device_id == ark_device_caps[p].device_id) { - rqpacing = ark_device_caps[p].caps.rqpacing; ark->isvf = ark_device_caps[p].caps.isvf; break; } @@ -343,12 +337,6 @@ eth_ark_dev_init(struct rte_eth_dev *dev) ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; - if (rqpacing) { - ark->rqpacing = - (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); - } else { - ark->rqpacing = NULL; - } ark->started = 0; ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL; @@ -367,17 +355,6 @@ eth_ark_dev_init(struct rte_eth_dev *dev) ark->sysctrl.t32[4], __func__); return -1; } - if (ark->sysctrl.t32[3] != 0) { - if (ark->rqpacing) { - if (ark_rqp_lasped(ark->rqpacing)) { - ARK_PMD_LOG(ERR, "Arkville Evaluation System - " - "Timer has Expired\n"); - return -1; - } - ARK_PMD_LOG(WARNING, "Arkville Evaluation System - " - "Timer is Running\n"); - } - } ARK_PMD_LOG(DEBUG, "HW Sanity test has PASSED, expected constant" @@ -427,6 +404,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) ark->user_ext.dev_get_port_count(dev, ark->user_data[dev->data->port_id]); ark->num_ports = port_count; + num_queues = ark_api_num_queues_per_port(ark->mpurx.v, port_count); for (p = 0; p < port_count; p++) { struct rte_eth_dev *eth_dev; @@ -452,7 +430,18 @@ eth_ark_dev_init(struct rte_eth_dev *dev) } eth_dev->device = &pci_dev->device; - eth_dev->data->dev_private = ark; + /* Device requires new dev_private data */ + eth_dev->data->dev_private = + rte_zmalloc_socket(name, + sizeof(struct ark_adapter), + RTE_CACHE_LINE_SIZE, + rte_socket_id()); + + memcpy(eth_dev->data->dev_private, ark, + sizeof(struct ark_adapter)); + ark = eth_dev->data->dev_private; + ark->qbase = p * num_queues; + eth_dev->dev_ops = ark->eth_dev->dev_ops; eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; @@ -537,9 +526,6 @@ ark_config_device(struct rte_eth_dev *dev) mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); } - if (!ark->isvf && ark->rqpacing) - ark_rqp_stats_reset(ark->rqpacing); - return 0; } @@ -598,17 +584,16 @@ eth_ark_dev_start(struct rte_eth_dev *dev) ark_pktchkr_run(ark->pc); if (!ark->isvf && ark->start_pg && !ark->pg_running) { - pthread_t thread; + rte_thread_t thread; - /* Delay packet generatpr start allow the hardware to be ready + /* Delay packet generator start allow the hardware to be ready * This is only used for sanity checking with internal generator */ - char tname[32]; - snprintf(tname, sizeof(tname), "ark-delay-pg-%d", - dev->data->port_id); + char tname[RTE_THREAD_INTERNAL_NAME_SIZE]; + snprintf(tname, sizeof(tname), "ark-pg%d", dev->data->port_id); - if (rte_ctrl_thread_create(&thread, tname, NULL, - ark_pktgen_delay_start, ark->pg)) { + if (rte_thread_create_internal_control(&thread, tname, + ark_pktgen_delay_start, ark->pg)) { ARK_PMD_LOG(ERR, "Could not create pktgen " "starter thread\n"); return -1; @@ -697,9 +682,6 @@ eth_ark_dev_close(struct rte_eth_dev *dev) /* * This should only be called once for the device during shutdown */ - if (ark->rqpacing) - ark_rqp_dump(ark->rqpacing); - /* return to power-on state */ if (ark->pd) ark_pktdir_setup(ark->pd, ARK_PKT_DIR_INIT_VAL); diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c index cbc0416bc2d..24f1c65690e 100644 --- a/drivers/net/ark/ark_ethdev_rx.c +++ b/drivers/net/ark/ark_ethdev_rx.c @@ -9,6 +9,7 @@ #include "ark_logs.h" #include "ark_mpu.h" #include "ark_udm.h" +#include "ark_ext.h" #define ARK_RX_META_SIZE 32 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE) @@ -68,7 +69,7 @@ struct ark_rx_queue { static int eth_ark_rx_hw_setup(struct rte_eth_dev *dev, struct ark_rx_queue *queue, - uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx) + uint16_t rx_queue_idx) { rte_iova_t queue_base; rte_iova_t phys_addr_q_base; @@ -124,7 +125,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, uint32_t i; int status; - int qidx = queue_idx; + int qidx = ark->qbase + queue_idx; /* We may already be setup, free memory prior to re-allocation */ if (dev->data->rx_queues[queue_idx] != NULL) { @@ -166,6 +167,13 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, queue->mb_pool = mb_pool; queue->dataroom = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; + + /* Check pool's private data to confirm pool structure */ + if (mb_pool->private_data_size != 0) { + struct rte_pmd_ark_lmbuf_mempool_priv *pool_priv = rte_mempool_get_priv(mb_pool); + if (strncmp(pool_priv->cookie, ARK_MEMPOOL_COOKIE, sizeof(pool_priv->cookie)) == 0) + queue->dataroom = pool_priv->dataroom; + } queue->headroom = RTE_PKTMBUF_HEADROOM; queue->phys_qid = qidx; queue->queue_index = queue_idx; @@ -215,7 +223,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, } /* MPU Setup */ if (status == 0) - status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx); + status = eth_ark_rx_hw_setup(dev, queue, queue_idx); if (unlikely(status != 0)) { struct rte_mbuf **mbuf; diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c index 5940a592a2c..4792754f192 100644 --- a/drivers/net/ark/ark_ethdev_tx.c +++ b/drivers/net/ark/ark_ethdev_tx.c @@ -229,7 +229,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, struct ark_tx_queue *queue; int status; - int qidx = queue_idx; + int qidx = ark->qbase + queue_idx; if (!rte_is_power_of_2(nb_desc)) { ARK_PMD_LOG(ERR, diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h index d235d0ff858..6d37449195d 100644 --- a/drivers/net/ark/ark_ext.h +++ b/drivers/net/ark/ark_ext.h @@ -5,6 +5,7 @@ #ifndef _ARK_EXT_H_ #define _ARK_EXT_H_ +#include #include /* The following section lists function prototypes for Arkville's @@ -16,6 +17,14 @@ * See documentation for compiling and use of extensions. */ +/* private data optionally attached to mempool for rx */ +struct rte_pmd_ark_lmbuf_mempool_priv { + struct rte_pktmbuf_pool_private pool_private; + char cookie[4]; + uint32_t dataroom; +}; +#define ARK_MEMPOOL_COOKIE "ARK1" + /** * Extension prototype, required implementation if extensions are used. * Called during device probe to initialize the user structure diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h index 71d0b53e032..147b14b6c08 100644 --- a/drivers/net/ark/ark_global.h +++ b/drivers/net/ark/ark_global.h @@ -32,7 +32,6 @@ #define ARK_CMAC_BASE 0x80000 #define ARK_PKTDIR_BASE 0xa0000 #define ARK_PKTCHKR_BASE 0x90000 -#define ARK_RCPACING_BASE 0xb0000 #define ARK_EXTERNAL_BASE 0x100000 #define ARK_MPU_QOFFSET 0x00100 #define ARK_MAX_PORTS RTE_MAX_ETHPORTS @@ -112,7 +111,10 @@ struct ark_adapter { ark_pkt_chkr_t pc; ark_pkt_dir_t pd; + /* For single function, multiple ports */ int num_ports; + uint16_t qbase; + bool isvf; /* Packet generator/checker args */ @@ -147,8 +149,6 @@ struct ark_adapter { int started; uint16_t rx_queues; uint16_t tx_queues; - - struct ark_rqpace_t *rqpacing; }; typedef uint32_t *ark_t; diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c index cd2d3adc514..69ff7072b2a 100644 --- a/drivers/net/ark/ark_pktgen.c +++ b/drivers/net/ark/ark_pktgen.c @@ -4,10 +4,10 @@ #include #include -#include #include #include +#include #include "ark_pktgen.h" #include "ark_logs.h" @@ -467,7 +467,7 @@ ark_pktgen_setup(ark_pkt_gen_t handle) } } -void * +uint32_t ark_pktgen_delay_start(void *arg) { struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)arg; @@ -476,8 +476,8 @@ ark_pktgen_delay_start(void *arg) * perform a blind sleep here to ensure that the external test * application has time to setup the test before we generate packets */ - pthread_detach(pthread_self()); + rte_thread_detach(rte_thread_self()); usleep(100000); ark_pktgen_run(inst); - return NULL; + return 0; } diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h index 7147fe1bd42..925058367c1 100644 --- a/drivers/net/ark/ark_pktgen.h +++ b/drivers/net/ark/ark_pktgen.h @@ -75,6 +75,6 @@ void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr); void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x); void ark_pktgen_parse(char *argv); void ark_pktgen_setup(ark_pkt_gen_t handle); -void *ark_pktgen_delay_start(void *arg); +uint32_t ark_pktgen_delay_start(void *arg); #endif diff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c deleted file mode 100644 index efb9730fe66..00000000000 --- a/drivers/net/ark/ark_rqp.c +++ /dev/null @@ -1,70 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright (c) 2015-2018 Atomic Rules LLC - */ - -#include - -#include "ark_rqp.h" -#include "ark_logs.h" - -/* ************************************************************************* */ -void -ark_rqp_stats_reset(struct ark_rqpace_t *rqp) -{ - rqp->stats_clear = 1; - /* POR 992 */ - /* rqp->cpld_max = 992; */ - /* POR 64 */ - /* rqp->cplh_max = 64; */ -} - -/* ************************************************************************* */ -void -ark_rqp_dump(struct ark_rqpace_t *rqp) -{ - if (rqp->err_count_other || rqp->cmpl_errors) - ARK_PMD_LOG(ERR, - "RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d" - ARK_SU32 - ARK_SU32 - ARK_SU32 "\n", - rqp->ctrl, rqp->cplh_max, rqp->cpld_max, - "Error Count", rqp->err_cnt, - "Error General", rqp->err_count_other, - "Cmpl Errors", rqp->cmpl_errors); - - ARK_PMD_LOG(INFO, "RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d" - ARK_SU32 - ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 - ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 - ARK_SU32 ARK_SU32 ARK_SU32 - ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n", - rqp->ctrl, rqp->cplh_max, rqp->cpld_max, - "Error Count", rqp->err_cnt, - "Error General", rqp->err_count_other, - "stall_pS", rqp->stall_ps, - "stall_pS Min", rqp->stall_ps_min, - "stall_pS Max", rqp->stall_ps_max, - "req_pS", rqp->req_ps, - "req_pS Min", rqp->req_ps_min, - "req_pS Max", rqp->req_ps_max, - "req_dWPS", rqp->req_dw_ps, - "req_dWPS Min", rqp->req_dw_ps_min, - "req_dWPS Max", rqp->req_dw_ps_max, - "cpl_pS", rqp->cpl_ps, - "cpl_pS Min", rqp->cpl_ps_min, - "cpl_pS Max", rqp->cpl_ps_max, - "cpl_dWPS", rqp->cpl_dw_ps, - "cpl_dWPS Min", rqp->cpl_dw_ps_min, - "cpl_dWPS Max", rqp->cpl_dw_ps_max, - "cplh pending", rqp->cplh_pending, - "cpld pending", rqp->cpld_pending, - "cplh pending max", rqp->cplh_pending_max, - "cpld pending max", rqp->cpld_pending_max); -} - -int -ark_rqp_lasped(struct ark_rqpace_t *rqp) -{ - return rqp->lasped; -} diff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h deleted file mode 100644 index d09f242e1ef..00000000000 --- a/drivers/net/ark/ark_rqp.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright (c) 2015-2018 Atomic Rules LLC - */ - -#ifndef _ARK_RQP_H_ -#define _ARK_RQP_H_ - -#include - -#include - -/* The RQP or ReQuest Pacer is an internal Arkville hardware module - * which limits the PCIE data flow to insure correct operation for the - * particular hardware PCIE endpoint. - * This module is *not* intended for end-user manipulation, hence - * there is minimal documentation. - */ - -/* - * RQ Pacing core hardware structure - * This is an overlay structures to a memory mapped FPGA device. These - * structs will never be instantiated in ram memory - */ -struct ark_rqpace_t { - volatile uint32_t ctrl; - volatile uint32_t stats_clear; - volatile uint32_t cplh_max; - volatile uint32_t cpld_max; - volatile uint32_t err_cnt; - volatile uint32_t stall_ps; - volatile uint32_t stall_ps_min; - volatile uint32_t stall_ps_max; - volatile uint32_t req_ps; - volatile uint32_t req_ps_min; - volatile uint32_t req_ps_max; - volatile uint32_t req_dw_ps; - volatile uint32_t req_dw_ps_min; - volatile uint32_t req_dw_ps_max; - volatile uint32_t cpl_ps; - volatile uint32_t cpl_ps_min; - volatile uint32_t cpl_ps_max; - volatile uint32_t cpl_dw_ps; - volatile uint32_t cpl_dw_ps_min; - volatile uint32_t cpl_dw_ps_max; - volatile uint32_t cplh_pending; - volatile uint32_t cpld_pending; - volatile uint32_t cplh_pending_max; - volatile uint32_t cpld_pending_max; - volatile uint32_t err_count_other; - char eval[4]; - volatile int32_t lasped; - volatile uint32_t cmpl_errors; -}; - -void ark_rqp_dump(struct ark_rqpace_t *rqp); -void ark_rqp_stats_reset(struct ark_rqpace_t *rqp); -int ark_rqp_lasped(struct ark_rqpace_t *rqp); -#endif diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h index f0685c95c78..ec53ec7e79b 100644 --- a/drivers/net/ark/ark_udm.h +++ b/drivers/net/ark/ark_udm.h @@ -20,7 +20,8 @@ */ struct ark_rx_meta { uint32_t user_meta[5]; /* user defined based on fpga code */ - uint8_t reserved[10]; + uint32_t pkt_len32; + uint8_t reserved[6]; uint16_t pkt_len; } __rte_packed; @@ -33,7 +34,7 @@ struct ark_rx_meta { #define ARK_RX_WRITE_TIME_NS 2500 #define ARK_UDM_SETUP 0 #define ARK_UDM_MODID 0x4d445500 -#define ARK_UDM_MODVER 0x37313232 +#define ARK_UDM_MODVER 0x37333332 struct ark_udm_setup_t { union { diff --git a/drivers/net/ark/meson.build b/drivers/net/ark/meson.build index 8d87744c22f..12b3935b854 100644 --- a/drivers/net/ark/meson.build +++ b/drivers/net/ark/meson.build @@ -16,6 +16,5 @@ sources = files( 'ark_pktchkr.c', 'ark_pktdir.c', 'ark_pktgen.c', - 'ark_rqp.c', 'ark_udm.c', ) diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c index b2a08f56354..53d9e38c939 100644 --- a/drivers/net/avp/avp_ethdev.c +++ b/drivers/net/avp/avp_ethdev.c @@ -2036,6 +2036,7 @@ static int avp_dev_start(struct rte_eth_dev *eth_dev) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t i; int ret; rte_spinlock_lock(&avp->lock); @@ -2056,6 +2057,11 @@ avp_dev_start(struct rte_eth_dev *eth_dev) /* remember current link state */ avp->flags |= AVP_F_LINKUP; + for (i = 0; i < avp->num_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < avp->num_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + ret = 0; unlock: @@ -2067,6 +2073,7 @@ static int avp_dev_stop(struct rte_eth_dev *eth_dev) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t i; int ret; rte_spinlock_lock(&avp->lock); @@ -2086,6 +2093,11 @@ avp_dev_stop(struct rte_eth_dev *eth_dev) ret); } + for (i = 0; i < avp->num_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < avp->num_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + unlock: rte_spinlock_unlock(&avp->lock); return ret; diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c index 48714eebe69..3717166384b 100644 --- a/drivers/net/axgbe/axgbe_ethdev.c +++ b/drivers/net/axgbe/axgbe_ethdev.c @@ -12,6 +12,12 @@ #include "eal_filesystem.h" +#ifdef RTE_ARCH_X86 +#include +#else +#define __cpuid(n, a, b, c, d) +#endif + static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); static int axgbe_dev_configure(struct rte_eth_dev *dev); static int axgbe_dev_start(struct rte_eth_dev *dev); @@ -172,9 +178,14 @@ static const struct axgbe_xstats axgbe_xstats_strings[] = { /* The set of PCI devices this driver supports */ #define AMD_PCI_VENDOR_ID 0x1022 -#define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 -#define AMD_PCI_YC_ROOT_COMPLEX_ID 0x14b5 -#define AMD_PCI_SNOWY_ROOT_COMPLEX_ID 0x1450 + +#define Fam17h 0x17 +#define Fam19h 0x19 + +#define CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 +#define CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 +#define CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 + #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 @@ -352,6 +363,7 @@ static int axgbe_dev_start(struct rte_eth_dev *dev) { struct axgbe_port *pdata = dev->data->dev_private; + uint16_t i; int ret; dev->dev_ops = &axgbe_eth_dev_ops; @@ -388,6 +400,12 @@ axgbe_dev_start(struct rte_eth_dev *dev) axgbe_set_rx_function(dev); axgbe_set_tx_function(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -2111,29 +2129,6 @@ static void axgbe_default_config(struct axgbe_port *pdata) pdata->power_down = 0; } -/* - * Return PCI root complex device id on success else 0 - */ -static uint16_t -get_pci_rc_devid(void) -{ - char pci_sysfs[PATH_MAX]; - const struct rte_pci_addr pci_rc_addr = {0, 0, 0, 0}; - unsigned long device_id; - - snprintf(pci_sysfs, sizeof(pci_sysfs), "%s/" PCI_PRI_FMT "/device", - rte_pci_get_sysfs_path(), pci_rc_addr.domain, - pci_rc_addr.bus, pci_rc_addr.devid, pci_rc_addr.function); - - /* get device id */ - if (eal_parse_sysfs_value(pci_sysfs, &device_id) < 0) { - PMD_INIT_LOG(ERR, "Error in reading PCI sysfs\n"); - return 0; - } - - return (uint16_t)device_id; -} - /* Used in dev_start by primary process and then * in dev_init by secondary process when attaching to an existing ethdev. */ @@ -2186,6 +2181,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) uint32_t len; int ret; + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + unsigned char cpu_family = 0, cpu_model = 0; + eth_dev->dev_ops = &axgbe_eth_dev_ops; eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; @@ -2230,26 +2228,55 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) pdata->vdata = &axgbe_v2b; /* - * Use PCI root complex device ID to identify the CPU + * Use CPUID to get Family and model ID to identify the CPU */ - switch (get_pci_rc_devid()) { - case AMD_PCI_RV_ROOT_COMPLEX_ID: - pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; - break; - case AMD_PCI_YC_ROOT_COMPLEX_ID: - pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; - /* Yellow Carp devices do not need cdr workaround */ - pdata->vdata->an_cdr_workaround = 0; + __cpuid(0x0, eax, ebx, ecx, edx); + + if (ebx == CPUID_VENDOR_AuthenticAMD_ebx && + edx == CPUID_VENDOR_AuthenticAMD_edx && + ecx == CPUID_VENDOR_AuthenticAMD_ecx) { + int unknown_cpu = 0; + eax = 0, ebx = 0, ecx = 0, edx = 0; + + __cpuid(0x1, eax, ebx, ecx, edx); + + cpu_family = ((GET_BITS(eax, 8, 4)) + (GET_BITS(eax, 20, 8))); + cpu_model = ((GET_BITS(eax, 4, 4)) | (((GET_BITS(eax, 16, 4)) << 4) & 0xF0)); + + switch (cpu_family) { + case Fam17h: + /* V1000/R1000 */ + if (cpu_model >= 0x10 && cpu_model <= 0x1F) { + pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; + /* EPYC 3000 */ + } else if (cpu_model >= 0x01 && cpu_model <= 0x0F) { + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + } else { + unknown_cpu = 1; + } break; - case AMD_PCI_SNOWY_ROOT_COMPLEX_ID: - pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + case Fam19h: + /* V3000 (Yellow Carp) */ + if (cpu_model >= 0x44 && cpu_model <= 0x47) { + pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; + + /* Yellow Carp devices do not need cdr workaround */ + pdata->vdata->an_cdr_workaround = 0; + } else { + unknown_cpu = 1; + } break; - default: - PMD_DRV_LOG(ERR, "No supported devices found\n"); - return -ENODEV; + default: + unknown_cpu = 1; + break; + } + if (unknown_cpu) { + PMD_DRV_LOG(ERR, "Unknown CPU family, no supported axgbe device found\n"); + return -ENODEV; + } } /* Configure the PCS indirect addressing support */ diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 625a92109ba..a9ff291cef3 100644 --- a/drivers/net/axgbe/axgbe_rxtx.c +++ b/drivers/net/axgbe/axgbe_rxtx.c @@ -1124,6 +1124,7 @@ void axgbe_dev_clear_queues(struct rte_eth_dev *dev) axgbe_rx_queue_release(rxq); dev->data->rx_queues[i] = NULL; } + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } for (i = 0; i < dev->data->nb_tx_queues; i++) { @@ -1133,6 +1134,7 @@ void axgbe_dev_clear_queues(struct rte_eth_dev *dev) axgbe_tx_queue_release(txq); dev->data->tx_queues[i] = NULL; } + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 29c16bb207c..c3283c94f38 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -5843,17 +5843,15 @@ static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) return 0; } - pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr, + pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + RTE_PCI_PM_CTRL), &pmcsr, 2); switch (state) { case PCI_PM_D0: - pci_write_word(sc, - (sc->devinfo.pcie_pm_cap_reg + - PCIR_POWER_STATUS), - ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME)); + pci_write_word(sc, (sc->devinfo.pcie_pm_cap_reg + RTE_PCI_PM_CTRL), + ((pmcsr & ~RTE_PCI_PM_CTRL_STATE_MASK) | RTE_PCI_PM_CTRL_PME_STATUS)); - if (pmcsr & PCIM_PSTAT_DMASK) { + if (pmcsr & RTE_PCI_PM_CTRL_STATE_MASK) { /* delay required during transition out of D3hot */ DELAY(20000); } @@ -5866,16 +5864,17 @@ static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) return 0; } - pmcsr &= ~PCIM_PSTAT_DMASK; - pmcsr |= PCIM_PSTAT_D3; + pmcsr &= ~RTE_PCI_PM_CTRL_STATE_MASK; + /* D3 power state */ + pmcsr |= 0x3; if (sc->wol) { - pmcsr |= PCIM_PSTAT_PMEENABLE; + pmcsr |= RTE_PCI_PM_CTRL_PME_ENABLE; } pci_write_long(sc, (sc->devinfo.pcie_pm_cap_reg + - PCIR_POWER_STATUS), pmcsr); + RTE_PCI_PM_CTRL), pmcsr); /* * No more memory access after this point until device is brought back @@ -7613,7 +7612,7 @@ static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) struct bnx2x_pci_cap *caps; /* ensure PCIe capability is enabled */ - caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP); + caps = pci_find_cap(sc, RTE_PCI_CAP_ID_EXP, BNX2X_PCI_CAP); if (NULL != caps) { PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: " "id=0x%04X type=0x%04X addr=0x%08X", @@ -7629,8 +7628,8 @@ static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc) { - return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) & - PCIM_EXP_STA_TRANSACTION_PND; + return bnx2x_pcie_capability_read(sc, RTE_PCI_EXP_TYPE_RC_EC) & + RTE_PCI_EXP_DEVSTA_TRPND; } /* @@ -7647,7 +7646,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) int reg = 0; /* check if PCI Power Management is enabled */ - caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP); + caps = pci_find_cap(sc, RTE_PCI_CAP_ID_PM, BNX2X_PCI_CAP); if (NULL != caps) { PMD_DRV_LOG(DEBUG, sc, "Found PM capability: " "id=0x%04X type=0x%04X addr=0x%08X", @@ -7657,11 +7656,11 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) sc->devinfo.pcie_pm_cap_reg = caps->addr; } - link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA); + link_status = bnx2x_pcie_capability_read(sc, RTE_PCI_EXP_LNKSTA); - sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); + sc->devinfo.pcie_link_speed = (link_status & RTE_PCI_EXP_LNKSTA_CLS); sc->devinfo.pcie_link_width = - ((link_status & PCIM_LINK_STA_WIDTH) >> 4); + ((link_status & RTE_PCI_EXP_LNKSTA_NLW) >> 4); PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d", sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); @@ -7669,7 +7668,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG; /* check if MSI capability is enabled */ - caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP); + caps = pci_find_cap(sc, RTE_PCI_CAP_ID_MSI, BNX2X_PCI_CAP); if (NULL != caps) { PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg); @@ -7678,7 +7677,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) } /* check if MSI-X capability is enabled */ - caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP); + caps = pci_find_cap(sc, RTE_PCI_CAP_ID_MSIX, BNX2X_PCI_CAP); if (NULL != caps) { PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg); @@ -9586,14 +9585,17 @@ static void bnx2x_init_multi_cos(struct bnx2x_softc *sc) } } +static uint8_t bnx2x_pci_capabilities[] = { + RTE_PCI_CAP_ID_EXP, + RTE_PCI_CAP_ID_PM, + RTE_PCI_CAP_ID_MSI, + RTE_PCI_CAP_ID_MSIX, +}; + static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) { - struct { - uint8_t id; - uint8_t next; - } pci_cap; - uint16_t status; struct bnx2x_pci_cap *cap; + unsigned int i; cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap), RTE_CACHE_LINE_SIZE); @@ -9602,29 +9604,21 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) return -ENOMEM; } -#ifndef RTE_EXEC_ENV_FREEBSD - pci_read(sc, PCI_STATUS, &status, 2); - if (!(status & PCI_STATUS_CAP_LIST)) { -#else - pci_read(sc, PCIR_STATUS, &status, 2); - if (!(status & PCIM_STATUS_CAPPRESENT)) { -#endif + if (!rte_pci_has_capability_list(sc->pci_dev)) { PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed"); return -1; } -#ifndef RTE_EXEC_ENV_FREEBSD - pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1); -#else - pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1); -#endif - while (pci_cap.next) { - cap->addr = pci_cap.next & ~3; - pci_read(sc, pci_cap.next & ~3, &pci_cap, 2); - if (pci_cap.id == 0xff) - break; - cap->id = pci_cap.id; + for (i = 0; i < RTE_DIM(bnx2x_pci_capabilities); i++) { + off_t pos = rte_pci_find_capability(sc->pci_dev, + bnx2x_pci_capabilities[i]); + + if (pos <= 0) + continue; + + cap->id = bnx2x_pci_capabilities[i]; cap->type = BNX2X_PCI_CAP; + cap->addr = pos; cap->next = rte_zmalloc("pci_cap", sizeof(struct bnx2x_pci_cap), RTE_CACHE_LINE_SIZE); @@ -9771,9 +9765,9 @@ int bnx2x_attach(struct bnx2x_softc *sc) if (sc->devinfo.pcie_msix_cap_reg != 0) { uint32_t val; pci_read(sc, - (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val, + (sc->devinfo.pcie_msix_cap_reg + RTE_PCI_MSIX_FLAGS), &val, 2); - sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1; + sc->igu_sb_cnt = (val & RTE_PCI_MSIX_FLAGS_QSIZE) + 1; } else { sc->igu_sb_cnt = 1; } @@ -9983,10 +9977,10 @@ static void bnx2x_init_pxp(struct bnx2x_softc *sc) uint16_t devctl; int r_order, w_order; - devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL); + devctl = bnx2x_pcie_capability_read(sc, RTE_PCI_EXP_DEVCTL); - w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); - r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); + w_order = ((devctl & RTE_PCI_EXP_DEVCTL_PAYLOAD) >> 5); + r_order = ((devctl & RTE_PCI_EXP_DEVCTL_READRQ) >> 12); ecore_init_pxp_arb(sc, r_order, w_order); } diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 89414ac88a9..35206b4758d 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -30,56 +30,10 @@ #include "elink.h" -#ifndef RTE_EXEC_ENV_FREEBSD -#include - -#define PCIY_PMG PCI_CAP_ID_PM -#define PCIY_MSI PCI_CAP_ID_MSI -#define PCIY_EXPRESS PCI_CAP_ID_EXP -#define PCIY_MSIX PCI_CAP_ID_MSIX -#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC -#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND -#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA -#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW -#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS -#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL -#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD -#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ -#define PCIR_POWER_STATUS PCI_PM_CTRL -#define PCIM_PSTAT_DMASK PCI_PM_CTRL_STATE_MASK -#define PCIM_PSTAT_PME PCI_PM_CTRL_PME_STATUS -#define PCIM_PSTAT_D3 0x3 -#define PCIM_PSTAT_PMEENABLE PCI_PM_CTRL_PME_ENABLE -#define PCIR_MSIX_CTRL PCI_MSIX_FLAGS -#define PCIM_MSIXCTRL_TABLE_SIZE PCI_MSIX_FLAGS_QSIZE -#else -#include -#endif - #define IFM_10G_CX4 20 /* 10GBase CX4 copper */ #define IFM_10G_TWINAX 22 /* 10GBase Twinax copper */ #define IFM_10G_T 26 /* 10GBase-T - RJ45 */ -#ifndef RTE_EXEC_ENV_FREEBSD -#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC -#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND -#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA -#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW -#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS -#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL -#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD -#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ -#else -#define PCIR_EXPRESS_DEVICE_STA PCIER_DEVICE_STA -#define PCIM_EXP_STA_TRANSACTION_PND PCIEM_STA_TRANSACTION_PND -#define PCIR_EXPRESS_LINK_STA PCIER_LINK_STA -#define PCIM_LINK_STA_WIDTH PCIEM_LINK_STA_WIDTH -#define PCIM_LINK_STA_SPEED PCIEM_LINK_STA_SPEED -#define PCIR_EXPRESS_DEVICE_CTL PCIER_DEVICE_CTL -#define PCIM_EXP_CTL_MAX_PAYLOAD PCIEM_CTL_MAX_PAYLOAD -#define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST -#endif - #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) RTE_DIM(arr) #endif diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 4448cf2de2d..1327cbe912a 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -211,6 +211,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; int ret = 0; + uint16_t i; PMD_INIT_FUNC_TRACE(sc); @@ -244,6 +245,11 @@ bnx2x_dev_start(struct rte_eth_dev *dev) bnx2x_print_device_info(sc); + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return ret; } @@ -252,6 +258,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; int ret = 0; + uint16_t i; PMD_INIT_FUNC_TRACE(sc); @@ -277,6 +284,11 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) return ret; } + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index ed21ba7f293..0e01b1d4bae 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -8,6 +8,7 @@ #include #include +#include #include #include diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index ee1552452a1..5c4d96d4b14 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1280,7 +1280,7 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev) if (bp->ieee_1588) goto use_scalar_rx; -#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) +#if defined(RTE_ARCH_X86) if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { PMD_DRV_LOG(INFO, @@ -1332,7 +1332,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev) BNXT_TRUFLOW_EN(bp) || bp->ieee_1588) goto use_scalar_tx; -#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) +#if defined(RTE_ARCH_X86) if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { PMD_DRV_LOG(INFO, @@ -1507,6 +1507,7 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct rte_eth_link link; + uint16_t i; int ret; eth_dev->data->dev_started = 0; @@ -1567,6 +1568,11 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) eth_dev->data->scattered_rx = 0; + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -3018,8 +3024,6 @@ static const struct { {bnxt_recv_pkts, "Scalar"}, #if defined(RTE_ARCH_X86) {bnxt_recv_pkts_vec, "Vector SSE"}, -#endif -#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, #endif #if defined(RTE_ARCH_ARM64) @@ -3052,8 +3056,6 @@ static const struct { {bnxt_xmit_pkts, "Scalar"}, #if defined(RTE_ARCH_X86) {bnxt_xmit_pkts_vec, "Vector SSE"}, -#endif -#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, #endif #if defined(RTE_ARCH_ARM64) diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index 8e722b7bf02..af53bc0c250 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -112,7 +112,7 @@ uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq); #endif -#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) +#if defined(RTE_ARCH_X86) uint16_t bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); #endif diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c index d4e8e8eb871..ea8dbaffba1 100644 --- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c +++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c @@ -261,7 +261,7 @@ recv_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) valid = _mm_cvtsi128_si64(_mm256_extracti128_si256(info3_v, 1)); valid = (valid << CHAR_BIT) | _mm_cvtsi128_si64(_mm256_castsi256_si128(info3_v)); - num_valid = __builtin_popcountll(valid & desc_valid_mask); + num_valid = rte_popcount64(valid & desc_valid_mask); if (num_valid == 0) break; diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c index 2ad8591b900..e99a547f585 100644 --- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c +++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c @@ -259,7 +259,7 @@ recv_burst_vec_sse(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * the number of valid descriptors. */ valid = _mm_cvtsi128_si64(_mm_packs_epi32(info3_v, info3_v)); - num_valid = __builtin_popcountll(valid & desc_valid_mask); + num_valid = rte_popcount64(valid & desc_valid_mask); if (num_valid == 0) break; diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h index 8e391ee58a0..e64ea2c7d15 100644 --- a/drivers/net/bnxt/bnxt_txr.h +++ b/drivers/net/bnxt/bnxt_txr.h @@ -52,7 +52,7 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); #endif -#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) +#if defined(RTE_ARCH_X86) uint16_t bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); #endif diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build index c7a0d5f6c9e..1f9ce51f438 100644 --- a/drivers/net/bnxt/meson.build +++ b/drivers/net/bnxt/meson.build @@ -53,23 +53,15 @@ subdir('hcapi/cfa') if arch_subdir == 'x86' sources += files('bnxt_rxtx_vec_sse.c') - # compile AVX2 version if either: - # a. we have AVX supported in minimum instruction set baseline - # b. it's not minimum instruction set, but supported by compiler - if cc.get_define('__AVX2__', args: machine_args) != '' - cflags += ['-DCC_AVX2_SUPPORT'] - sources += files('bnxt_rxtx_vec_avx2.c') - elif cc.has_argument('-mavx2') - cflags += ['-DCC_AVX2_SUPPORT'] - bnxt_avx2_lib = static_library('bnxt_avx2_lib', - 'bnxt_rxtx_vec_avx2.c', - dependencies: [static_rte_ethdev, - static_rte_bus_pci, - static_rte_kvargs, static_rte_hash], - include_directories: includes, - c_args: [cflags, '-mavx2']) - objs += bnxt_avx2_lib.extract_objects('bnxt_rxtx_vec_avx2.c') - endif + # build AVX2 code with instruction set explicitly enabled for runtime selection + bnxt_avx2_lib = static_library('bnxt_avx2_lib', + 'bnxt_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, + static_rte_bus_pci, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += bnxt_avx2_lib.extract_objects('bnxt_rxtx_vec_avx2.c') elif arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64') sources += files('bnxt_rxtx_vec_neon.c') endif diff --git a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_sbmp.h b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_sbmp.h index 6ad158abe8c..78c6c2ab672 100644 --- a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_sbmp.h +++ b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_sbmp.h @@ -39,7 +39,7 @@ struct sbmp { #define SBMP_CLEAR(bm) (SBMP_WORD_GET(bm, 0) = 0) #define SBMP_IS_NULL(bm) (SBMP_WORD_GET(bm, 0) == 0) #define SBMP_COUNT(bm, count) \ - (count = __builtin_popcount(SBMP_WORD_GET(bm, 0))) + (count = rte_popcount32(SBMP_WORD_GET(bm, 0))) #elif SBMP_WORD_MAX == 2 #define SBMP_WENT(session) ((session) / SBMP_WORD_WIDTH) #define SBMP_WBIT(session) (1U << ((session) % SBMP_WORD_WIDTH)) @@ -53,8 +53,8 @@ struct sbmp { #define SBMP_COUNT(bm, count) \ do { \ typeof(bm) *_bm = &(bm); \ - count = __builtin_popcount(SBMP_WORD_GET(*_bm, 0)) + \ - __builtin_popcount(SBMP_WORD_GET(*_bm, 1))); \ + count = rte_popcount32(SBMP_WORD_GET(*_bm, 0)) + \ + rte_popcount32(SBMP_WORD_GET(*_bm, 1))); \ } while (0) #elif SBMP_WORD_MAX == 3 #define SBMP_WENT(session) ((session) / SBMP_WORD_WIDTH) @@ -71,9 +71,9 @@ struct sbmp { #define SBMP_COUNT(bm, count) \ do { \ typeof(bm) *_bm = &(bm); \ - count = __builtin_popcount(SBMP_WORD_GET(*_bm, 0)) + \ - __builtin_popcount(SBMP_WORD_GET(*_bm, 1)) + \ - __builtin_popcount(SBMP_WORD_GET(*_bm, 2)); \ + count = rte_popcount32(SBMP_WORD_GET(*_bm, 0)) + \ + rte_popcount32(SBMP_WORD_GET(*_bm, 1)) + \ + rte_popcount32(SBMP_WORD_GET(*_bm, 2)); \ } while (0) #else /* SBMP_WORD_MAX > 3 */ #define SBMP_WENT(session) ((session) / SBMP_WORD_WIDTH) @@ -93,7 +93,7 @@ struct sbmp { int _count, _w; \ _count = 0; \ for (_w = 0; _w < SBMP_WORD_MAX; _w++) { \ - _count += __builtin_popcount(SBMP_WORD_GET(*_bm, _w)); \ + _count += rte_popcount32(SBMP_WORD_GET(*_bm, _w)); \ } \ count = _count; \ } while (0) diff --git a/drivers/net/bonding/bonding_testpmd.c b/drivers/net/bonding/bonding_testpmd.c index b3c12cada07..8fcd6cadd0b 100644 --- a/drivers/net/bonding/bonding_testpmd.c +++ b/drivers/net/bonding/bonding_testpmd.c @@ -279,7 +279,7 @@ struct cmd_set_bonding_primary_result { cmdline_fixed_string_t set; cmdline_fixed_string_t bonding; cmdline_fixed_string_t primary; - portid_t slave_id; + portid_t member_id; portid_t port_id; }; @@ -287,13 +287,13 @@ static void cmd_set_bonding_primary_parsed(void *parsed_result, __rte_unused struct cmdline *cl, __rte_unused void *data) { struct cmd_set_bonding_primary_result *res = parsed_result; - portid_t master_port_id = res->port_id; - portid_t slave_port_id = res->slave_id; + portid_t main_port_id = res->port_id; + portid_t member_port_id = res->member_id; - /* Set the primary slave for a bonded device. */ - if (rte_eth_bond_primary_set(master_port_id, slave_port_id) != 0) { - fprintf(stderr, "\t Failed to set primary slave for port = %d.\n", - master_port_id); + /* Set the primary member for a bonding device. */ + if (rte_eth_bond_primary_set(main_port_id, member_port_id) != 0) { + fprintf(stderr, "\t Failed to set primary member for port = %d.\n", + main_port_id); return; } init_port_config(); @@ -308,149 +308,149 @@ static cmdline_parse_token_string_t cmd_setbonding_primary_bonding = static cmdline_parse_token_string_t cmd_setbonding_primary_primary = TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result, primary, "primary"); -static cmdline_parse_token_num_t cmd_setbonding_primary_slave = +static cmdline_parse_token_num_t cmd_setbonding_primary_member = TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result, - slave_id, RTE_UINT16); + member_id, RTE_UINT16); static cmdline_parse_token_num_t cmd_setbonding_primary_port = TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result, port_id, RTE_UINT16); static cmdline_parse_inst_t cmd_set_bonding_primary = { .f = cmd_set_bonding_primary_parsed, - .help_str = "set bonding primary : " - "Set the primary slave for port_id", + .help_str = "set bonding primary : " + "Set the primary member for port_id", .data = NULL, .tokens = { (void *)&cmd_setbonding_primary_set, (void *)&cmd_setbonding_primary_bonding, (void *)&cmd_setbonding_primary_primary, - (void *)&cmd_setbonding_primary_slave, + (void *)&cmd_setbonding_primary_member, (void *)&cmd_setbonding_primary_port, NULL } }; -/* *** ADD SLAVE *** */ -struct cmd_add_bonding_slave_result { +/* *** ADD Member *** */ +struct cmd_add_bonding_member_result { cmdline_fixed_string_t add; cmdline_fixed_string_t bonding; - cmdline_fixed_string_t slave; - portid_t slave_id; + cmdline_fixed_string_t member; + portid_t member_id; portid_t port_id; }; -static void cmd_add_bonding_slave_parsed(void *parsed_result, +static void cmd_add_bonding_member_parsed(void *parsed_result, __rte_unused struct cmdline *cl, __rte_unused void *data) { - struct cmd_add_bonding_slave_result *res = parsed_result; - portid_t master_port_id = res->port_id; - portid_t slave_port_id = res->slave_id; + struct cmd_add_bonding_member_result *res = parsed_result; + portid_t main_port_id = res->port_id; + portid_t member_port_id = res->member_id; - /* add the slave for a bonded device. */ - if (rte_eth_bond_slave_add(master_port_id, slave_port_id) != 0) { + /* add the member for a bonding device. */ + if (rte_eth_bond_member_add(main_port_id, member_port_id) != 0) { fprintf(stderr, - "\t Failed to add slave %d to master port = %d.\n", - slave_port_id, master_port_id); + "\t Failed to add member %d to main port = %d.\n", + member_port_id, main_port_id); return; } - ports[master_port_id].update_conf = 1; + ports[main_port_id].update_conf = 1; init_port_config(); - set_port_slave_flag(slave_port_id); + set_port_member_flag(member_port_id); } -static cmdline_parse_token_string_t cmd_addbonding_slave_add = - TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result, +static cmdline_parse_token_string_t cmd_addbonding_member_add = + TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_member_result, add, "add"); -static cmdline_parse_token_string_t cmd_addbonding_slave_bonding = - TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result, +static cmdline_parse_token_string_t cmd_addbonding_member_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_member_result, bonding, "bonding"); -static cmdline_parse_token_string_t cmd_addbonding_slave_slave = - TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result, - slave, "slave"); -static cmdline_parse_token_num_t cmd_addbonding_slave_slaveid = - TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result, - slave_id, RTE_UINT16); -static cmdline_parse_token_num_t cmd_addbonding_slave_port = - TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result, +static cmdline_parse_token_string_t cmd_addbonding_member_member = + TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_member_result, + member, "member"); +static cmdline_parse_token_num_t cmd_addbonding_member_memberid = + TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_member_result, + member_id, RTE_UINT16); +static cmdline_parse_token_num_t cmd_addbonding_member_port = + TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_member_result, port_id, RTE_UINT16); -static cmdline_parse_inst_t cmd_add_bonding_slave = { - .f = cmd_add_bonding_slave_parsed, - .help_str = "add bonding slave : " - "Add a slave device to a bonded device", +static cmdline_parse_inst_t cmd_add_bonding_member = { + .f = cmd_add_bonding_member_parsed, + .help_str = "add bonding member : " + "Add a member device to a bonding device", .data = NULL, .tokens = { - (void *)&cmd_addbonding_slave_add, - (void *)&cmd_addbonding_slave_bonding, - (void *)&cmd_addbonding_slave_slave, - (void *)&cmd_addbonding_slave_slaveid, - (void *)&cmd_addbonding_slave_port, + (void *)&cmd_addbonding_member_add, + (void *)&cmd_addbonding_member_bonding, + (void *)&cmd_addbonding_member_member, + (void *)&cmd_addbonding_member_memberid, + (void *)&cmd_addbonding_member_port, NULL } }; -/* *** REMOVE SLAVE *** */ -struct cmd_remove_bonding_slave_result { +/* *** REMOVE Member *** */ +struct cmd_remove_bonding_member_result { cmdline_fixed_string_t remove; cmdline_fixed_string_t bonding; - cmdline_fixed_string_t slave; - portid_t slave_id; + cmdline_fixed_string_t member; + portid_t member_id; portid_t port_id; }; -static void cmd_remove_bonding_slave_parsed(void *parsed_result, +static void cmd_remove_bonding_member_parsed(void *parsed_result, __rte_unused struct cmdline *cl, __rte_unused void *data) { - struct cmd_remove_bonding_slave_result *res = parsed_result; - portid_t master_port_id = res->port_id; - portid_t slave_port_id = res->slave_id; + struct cmd_remove_bonding_member_result *res = parsed_result; + portid_t main_port_id = res->port_id; + portid_t member_port_id = res->member_id; - /* remove the slave from a bonded device. */ - if (rte_eth_bond_slave_remove(master_port_id, slave_port_id) != 0) { + /* remove the member from a bonding device. */ + if (rte_eth_bond_member_remove(main_port_id, member_port_id) != 0) { fprintf(stderr, - "\t Failed to remove slave %d from master port = %d.\n", - slave_port_id, master_port_id); + "\t Failed to remove member %d from main port = %d.\n", + member_port_id, main_port_id); return; } init_port_config(); - clear_port_slave_flag(slave_port_id); + clear_port_member_flag(member_port_id); } -static cmdline_parse_token_string_t cmd_removebonding_slave_remove = - TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result, +static cmdline_parse_token_string_t cmd_removebonding_member_remove = + TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_member_result, remove, "remove"); -static cmdline_parse_token_string_t cmd_removebonding_slave_bonding = - TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result, +static cmdline_parse_token_string_t cmd_removebonding_member_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_member_result, bonding, "bonding"); -static cmdline_parse_token_string_t cmd_removebonding_slave_slave = - TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result, - slave, "slave"); -static cmdline_parse_token_num_t cmd_removebonding_slave_slaveid = - TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result, - slave_id, RTE_UINT16); -static cmdline_parse_token_num_t cmd_removebonding_slave_port = - TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result, +static cmdline_parse_token_string_t cmd_removebonding_member_member = + TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_member_result, + member, "member"); +static cmdline_parse_token_num_t cmd_removebonding_member_memberid = + TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_member_result, + member_id, RTE_UINT16); +static cmdline_parse_token_num_t cmd_removebonding_member_port = + TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_member_result, port_id, RTE_UINT16); -static cmdline_parse_inst_t cmd_remove_bonding_slave = { - .f = cmd_remove_bonding_slave_parsed, - .help_str = "remove bonding slave : " - "Remove a slave device from a bonded device", +static cmdline_parse_inst_t cmd_remove_bonding_member = { + .f = cmd_remove_bonding_member_parsed, + .help_str = "remove bonding member : " + "Remove a member device from a bonding device", .data = NULL, .tokens = { - (void *)&cmd_removebonding_slave_remove, - (void *)&cmd_removebonding_slave_bonding, - (void *)&cmd_removebonding_slave_slave, - (void *)&cmd_removebonding_slave_slaveid, - (void *)&cmd_removebonding_slave_port, + (void *)&cmd_removebonding_member_remove, + (void *)&cmd_removebonding_member_bonding, + (void *)&cmd_removebonding_member_member, + (void *)&cmd_removebonding_member_memberid, + (void *)&cmd_removebonding_member_port, NULL } }; -/* *** CREATE BONDED DEVICE *** */ -struct cmd_create_bonded_device_result { +/* *** CREATE BONDING DEVICE *** */ +struct cmd_create_bonding_device_result { cmdline_fixed_string_t create; - cmdline_fixed_string_t bonded; + cmdline_fixed_string_t bonding; cmdline_fixed_string_t device; uint8_t mode; uint8_t socket; @@ -458,10 +458,10 @@ struct cmd_create_bonded_device_result { static int bond_dev_num; -static void cmd_create_bonded_device_parsed(void *parsed_result, +static void cmd_create_bonding_device_parsed(void *parsed_result, __rte_unused struct cmdline *cl, __rte_unused void *data) { - struct cmd_create_bonded_device_result *res = parsed_result; + struct cmd_create_bonding_device_result *res = parsed_result; char ethdev_name[RTE_ETH_NAME_MAX_LEN]; int port_id; int ret; @@ -474,13 +474,13 @@ static void cmd_create_bonded_device_parsed(void *parsed_result, snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "net_bonding_testpmd_%d", bond_dev_num++); - /* Create a new bonded device. */ + /* Create a new bonding device. */ port_id = rte_eth_bond_create(ethdev_name, res->mode, res->socket); if (port_id < 0) { - fprintf(stderr, "\t Failed to create bonded device.\n"); + fprintf(stderr, "\t Failed to create bonding device.\n"); return; } - printf("Created new bonded device %s on (port %d).\n", ethdev_name, + printf("Created new bonding device %s on (port %d).\n", ethdev_name, port_id); /* Update number of ports */ @@ -497,38 +497,38 @@ static void cmd_create_bonded_device_parsed(void *parsed_result, ports[port_id].port_status = RTE_PORT_STOPPED; } -static cmdline_parse_token_string_t cmd_createbonded_device_create = - TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result, +static cmdline_parse_token_string_t cmd_createbonding_device_create = + TOKEN_STRING_INITIALIZER(struct cmd_create_bonding_device_result, create, "create"); -static cmdline_parse_token_string_t cmd_createbonded_device_bonded = - TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result, - bonded, "bonded"); -static cmdline_parse_token_string_t cmd_createbonded_device_device = - TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result, +static cmdline_parse_token_string_t cmd_createbonding_device_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_create_bonding_device_result, + bonding, "bonding"); +static cmdline_parse_token_string_t cmd_createbonding_device_device = + TOKEN_STRING_INITIALIZER(struct cmd_create_bonding_device_result, device, "device"); -static cmdline_parse_token_num_t cmd_createbonded_device_mode = - TOKEN_NUM_INITIALIZER(struct cmd_create_bonded_device_result, +static cmdline_parse_token_num_t cmd_createbonding_device_mode = + TOKEN_NUM_INITIALIZER(struct cmd_create_bonding_device_result, mode, RTE_UINT8); -static cmdline_parse_token_num_t cmd_createbonded_device_socket = - TOKEN_NUM_INITIALIZER(struct cmd_create_bonded_device_result, +static cmdline_parse_token_num_t cmd_createbonding_device_socket = + TOKEN_NUM_INITIALIZER(struct cmd_create_bonding_device_result, socket, RTE_UINT8); -static cmdline_parse_inst_t cmd_create_bonded_device = { - .f = cmd_create_bonded_device_parsed, - .help_str = "create bonded device : " - "Create a new bonded device with specific bonding mode and socket", +static cmdline_parse_inst_t cmd_create_bonding_device = { + .f = cmd_create_bonding_device_parsed, + .help_str = "create bonding device : " + "Create a new bonding device with specific bonding mode and socket", .data = NULL, .tokens = { - (void *)&cmd_createbonded_device_create, - (void *)&cmd_createbonded_device_bonded, - (void *)&cmd_createbonded_device_device, - (void *)&cmd_createbonded_device_mode, - (void *)&cmd_createbonded_device_socket, + (void *)&cmd_createbonding_device_create, + (void *)&cmd_createbonding_device_bonding, + (void *)&cmd_createbonding_device_device, + (void *)&cmd_createbonding_device_mode, + (void *)&cmd_createbonding_device_socket, NULL } }; -/* *** SET MAC ADDRESS IN BONDED DEVICE *** */ +/* *** SET MAC ADDRESS IN BONDING DEVICE *** */ struct cmd_set_bond_mac_addr_result { cmdline_fixed_string_t set; cmdline_fixed_string_t bonding; @@ -584,7 +584,7 @@ static cmdline_parse_inst_t cmd_set_bond_mac_addr = { } }; -/* *** SET LINK STATUS MONITORING POLLING PERIOD ON BONDED DEVICE *** */ +/* *** SET LINK STATUS MONITORING POLLING PERIOD ON BONDING DEVICE *** */ struct cmd_set_bond_mon_period_result { cmdline_fixed_string_t set; cmdline_fixed_string_t bonding; @@ -697,7 +697,7 @@ static struct testpmd_driver_commands bonding_cmds = { { &cmd_set_bonding_mode, "set bonding mode (value) (port_id)\n" - " Set the bonding mode on a bonded device.\n", + " Set the bonding mode on a bonding device.\n", }, { &cmd_show_bonding_config, @@ -706,33 +706,33 @@ static struct testpmd_driver_commands bonding_cmds = { }, { &cmd_set_bonding_primary, - "set bonding primary (slave_id) (port_id)\n" - " Set the primary slave for a bonded device.\n", + "set bonding primary (member_id) (port_id)\n" + " Set the primary member for a bonding device.\n", }, { - &cmd_add_bonding_slave, - "add bonding slave (slave_id) (port_id)\n" - " Add a slave device to a bonded device.\n", + &cmd_add_bonding_member, + "add bonding member (member_id) (port_id)\n" + " Add a member device to a bonding device.\n", }, { - &cmd_remove_bonding_slave, - "remove bonding slave (slave_id) (port_id)\n" - " Remove a slave device from a bonded device.\n", + &cmd_remove_bonding_member, + "remove bonding member (member_id) (port_id)\n" + " Remove a member device from a bonding device.\n", }, { - &cmd_create_bonded_device, - "create bonded device (mode) (socket)\n" - " Create a new bonded device with specific bonding mode and socket.\n", + &cmd_create_bonding_device, + "create bonding device (mode) (socket)\n" + " Create a new bonding device with specific bonding mode and socket.\n", }, { &cmd_set_bond_mac_addr, "set bonding mac_addr (port_id) (address)\n" - " Set the MAC address of a bonded device.\n", + " Set the MAC address of a bonding device.\n", }, { &cmd_set_balance_xmit_policy, "set bonding balance_xmit_policy (port_id) (l2|l23|l34)\n" - " Set the transmit balance policy for bonded device running in balance mode.\n", + " Set the transmit balance policy for bonding device running in balance mode.\n", }, { &cmd_set_bond_mon_period, diff --git a/drivers/net/bonding/eth_bond_8023ad_private.h b/drivers/net/bonding/eth_bond_8023ad_private.h index a5e1fffea1d..ab7d15f81a3 100644 --- a/drivers/net/bonding/eth_bond_8023ad_private.h +++ b/drivers/net/bonding/eth_bond_8023ad_private.h @@ -15,10 +15,10 @@ #include "rte_eth_bond_8023ad.h" #define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100 -/** Maximum number of packets to one slave queued in TX ring. */ -#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3 -/** Maximum number of LACP packets from one slave queued in TX ring. */ -#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1 +/** Maximum number of packets to one member queued in TX ring. */ +#define BOND_MODE_8023AX_MEMBER_RX_PKTS 3 +/** Maximum number of LACP packets from one member queued in TX ring. */ +#define BOND_MODE_8023AX_MEMBER_TX_PKTS 1 /** * Timeouts definitions (5.4.4 in 802.1AX documentation). */ @@ -113,7 +113,7 @@ struct port { enum rte_bond_8023ad_selection selected; /** Indicates if either allmulti or promisc has been enforced on the - * slave so that we can receive lacp packets + * member so that we can receive lacp packets */ #define BOND_8023AD_FORCED_ALLMULTI (1 << 0) #define BOND_8023AD_FORCED_PROMISC (1 << 1) @@ -162,8 +162,8 @@ struct mode8023ad_private { uint8_t external_sm; struct rte_ether_addr mac_addr; - struct rte_eth_link slave_link; - /***< slave link properties */ + struct rte_eth_link member_link; + /***< member link properties */ /** * Configuration of dedicated hardware queues for control plane @@ -194,11 +194,11 @@ struct bond_dev_private; /** * @internal * - * Set mode 4 configuration of bonded interface. + * Set mode 4 configuration of bonding interface. * - * @pre Bonded interface must be stopped. + * @pre Bonding interface must be stopped. * - * @param dev Bonded interface + * @param dev Bonding interface * @param conf new configuration. If NULL set default configuration. */ void @@ -208,9 +208,9 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev, /** * @internal * - * Enables 802.1AX mode and all active slaves on bonded interface. + * Enables 802.1AX mode and all active members on bonding interface. * - * @param dev Bonded interface + * @param dev Bonding interface * @return * 0 on success, negative value otherwise. */ @@ -220,9 +220,9 @@ bond_mode_8023ad_enable(struct rte_eth_dev *dev); /** * @internal * - * Disables 802.1AX mode of the bonded interface and slaves. + * Disables 802.1AX mode of the bonding interface and members. * - * @param dev Bonded interface + * @param dev Bonding interface * @return * 0 on success, negative value otherwise. */ @@ -232,7 +232,7 @@ int bond_mode_8023ad_disable(struct rte_eth_dev *dev); * @internal * * Starts 802.3AX state machines management logic. - * @param dev Bonded interface + * @param dev Bonding interface * @return * 0 if machines was started, 1 if machines was already running, * negative value otherwise. @@ -244,7 +244,7 @@ bond_mode_8023ad_start(struct rte_eth_dev *dev); * @internal * * Stops 802.3AX state machines management logic. - * @param dev Bonded interface + * @param dev Bonding interface * @return * 0 if this call stopped state machines, -ENOENT if alarm was not set. */ @@ -255,55 +255,55 @@ bond_mode_8023ad_stop(struct rte_eth_dev *dev); * @internal * * Passes given slow packet to state machines management logic. - * @param internals Bonded device private data. - * @param slave_id Slave port id. + * @param internals Bonding device private data. + * @param member_id Member port id. * @param slot_pkt Slow packet. */ void bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, - uint16_t slave_id, struct rte_mbuf *pkt); + uint16_t member_id, struct rte_mbuf *pkt); /** * @internal * - * Appends given slave used slave + * Appends given member used member * - * @param dev Bonded interface. - * @param port_id Slave port ID to be added + * @param dev Bonding interface. + * @param port_id Member port ID to be added * * @return * 0 on success, negative value otherwise. */ void -bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id); +bond_mode_8023ad_activate_member(struct rte_eth_dev *dev, uint16_t port_id); /** * @internal * - * Denitializes and removes given slave from 802.1AX mode. + * Denitializes and removes given member from 802.1AX mode. * - * @param dev Bonded interface. - * @param slave_num Position of slave in active_slaves array + * @param dev Bonding interface. + * @param member_num Position of member in active_members array * * @return * 0 on success, negative value otherwise. */ int -bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos); +bond_mode_8023ad_deactivate_member(struct rte_eth_dev *dev, uint16_t member_pos); /** - * Updates state when MAC was changed on bonded device or one of its slaves. - * @param bond_dev Bonded device + * Updates state when MAC was changed on bonding device or one of its members. + * @param bond_dev Bonding device */ void bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev); int bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, - uint16_t slave_port); + uint16_t member_port); int -bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port); +bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t member_port); int bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id); diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h index d4f1fb27d4b..e6888942107 100644 --- a/drivers/net/bonding/eth_bond_private.h +++ b/drivers/net/bonding/eth_bond_private.h @@ -18,8 +18,8 @@ #include "eth_bond_8023ad_private.h" #include "rte_eth_bond_alb.h" -#define PMD_BOND_SLAVE_PORT_KVARG ("slave") -#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary") +#define PMD_BOND_MEMBER_PORT_KVARG ("member") +#define PMD_BOND_PRIMARY_MEMBER_KVARG ("primary") #define PMD_BOND_MODE_KVARG ("mode") #define PMD_BOND_AGG_MODE_KVARG ("agg_mode") #define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy") @@ -50,8 +50,8 @@ extern const struct rte_flow_ops bond_flow_ops; /** Port Queue Mapping Structure */ struct bond_rx_queue { uint16_t queue_id; - /**< Next active_slave to poll */ - uint16_t active_slave; + /**< Next active_member to poll */ + uint16_t active_member; /**< Queue Id */ struct bond_dev_private *dev_private; /**< Reference to eth_dev private structure */ @@ -74,19 +74,19 @@ struct bond_tx_queue { /**< Copy of TX configuration structure for queue */ }; -/** Bonded slave devices structure */ -struct bond_ethdev_slave_ports { - uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */ - uint16_t slave_count; /**< Number of slaves */ +/** Bonding member devices structure */ +struct bond_ethdev_member_ports { + uint16_t members[RTE_MAX_ETHPORTS]; /**< Member port id array */ + uint16_t member_count; /**< Number of members */ }; -struct bond_slave_details { +struct bond_member_details { uint16_t port_id; uint8_t link_status_poll_enabled; uint8_t link_status_wait_to_complete; uint8_t last_link_status; - /**< Port Id of slave eth_dev */ + /**< Port Id of member eth_dev */ struct rte_ether_addr persisted_mac_addr; uint16_t reta_size; @@ -94,7 +94,7 @@ struct bond_slave_details { struct rte_flow { TAILQ_ENTRY(rte_flow) next; - /* Slaves flows */ + /* Members flows */ struct rte_flow *flows[RTE_MAX_ETHPORTS]; /* Flow description for synchronization */ struct rte_flow_conv_rule rule; @@ -102,18 +102,18 @@ struct rte_flow { }; typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves); + uint16_t member_count, uint16_t *members); /** Link Bonding PMD device private configuration Structure */ struct bond_dev_private { - uint16_t port_id; /**< Port Id of Bonded Port */ + uint16_t port_id; /**< Port Id of Bonding Port */ uint8_t mode; /**< Link Bonding Mode */ rte_spinlock_t lock; rte_spinlock_t lsc_lock; - uint16_t primary_port; /**< Primary Slave Port */ - uint16_t current_primary_port; /**< Primary Slave Port */ + uint16_t primary_port; /**< Primary Member Port */ + uint16_t current_primary_port; /**< Primary Member Port */ uint16_t user_defined_primary_port; /**< Flag for whether primary port is user defined or not */ @@ -137,16 +137,16 @@ struct bond_dev_private { uint16_t nb_rx_queues; /**< Total number of rx queues */ uint16_t nb_tx_queues; /**< Total number of tx queues*/ - uint16_t active_slave_count; /**< Number of active slaves */ - uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */ + uint16_t active_member_count; /**< Number of active members */ + uint16_t active_members[RTE_MAX_ETHPORTS]; /**< Active member list */ - uint16_t slave_count; /**< Number of bonded slaves */ - struct bond_slave_details slaves[RTE_MAX_ETHPORTS]; - /**< Array of bonded slaves details */ + uint16_t member_count; /**< Number of bonding members */ + struct bond_member_details members[RTE_MAX_ETHPORTS]; + /**< Array of bonding members details */ struct mode8023ad_private mode4; - uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS]; - /**< TLB active slaves send order */ + uint16_t tlb_members_order[RTE_MAX_ETHPORTS]; + /**< TLB active members send order */ struct mode_alb_private mode6; uint64_t rx_offload_capa; /** Rx offload capability */ @@ -177,7 +177,7 @@ struct bond_dev_private { uint8_t rss_key_len; /**< hash key length in bytes. */ struct rte_kvargs *kvlist; - uint8_t slave_update_idx; + uint8_t member_update_idx; bool kvargs_processing_is_done; @@ -191,19 +191,21 @@ struct bond_dev_private { extern const struct eth_dev_ops default_dev_ops; int -check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev); +check_for_main_bonding_ethdev(const struct rte_eth_dev *eth_dev); int -check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev); +check_for_bonding_ethdev(const struct rte_eth_dev *eth_dev); -/* Search given slave array to find position of given id. - * Return slave pos or slaves_count if not found. */ +/* + * Search given member array to find position of given id. + * Return member pos or members_count if not found. + */ static inline uint16_t -find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) { +find_member_by_id(uint16_t *members, uint16_t members_count, uint16_t member_id) { uint16_t pos; - for (pos = 0; pos < slaves_count; pos++) { - if (slave_id == slaves[pos]) + for (pos = 0; pos < members_count; pos++) { + if (member_id == members[pos]) break; } @@ -214,16 +216,16 @@ int valid_port_id(uint16_t port_id); int -valid_bonded_port_id(uint16_t port_id); +valid_bonding_port_id(uint16_t port_id); int -valid_slave_port_id(struct bond_dev_private *internals, uint16_t port_id); +valid_member_port_id(struct bond_dev_private *internals, uint16_t port_id); void -deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id); +deactivate_member(struct rte_eth_dev *eth_dev, uint16_t port_id); void -activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id); +activate_member(struct rte_eth_dev *eth_dev, uint16_t port_id); int mac_address_set(struct rte_eth_dev *eth_dev, @@ -234,66 +236,66 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct rte_ether_addr *dst_mac_addr); int -mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev); +mac_address_members_update(struct rte_eth_dev *bonding_eth_dev); int -slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, - uint16_t slave_port_id); +member_add_mac_addresses(struct rte_eth_dev *bonding_eth_dev, + uint16_t member_port_id); int -slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, - uint16_t slave_port_id); +member_remove_mac_addresses(struct rte_eth_dev *bonding_eth_dev, + uint16_t member_port_id); int bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode); int -slave_configure(struct rte_eth_dev *bonded_eth_dev, - struct rte_eth_dev *slave_eth_dev); +member_configure(struct rte_eth_dev *bonding_eth_dev, + struct rte_eth_dev *member_eth_dev); int -slave_start(struct rte_eth_dev *bonded_eth_dev, - struct rte_eth_dev *slave_eth_dev); +member_start(struct rte_eth_dev *bonding_eth_dev, + struct rte_eth_dev *member_eth_dev); void -slave_remove(struct bond_dev_private *internals, - struct rte_eth_dev *slave_eth_dev); +member_remove(struct bond_dev_private *internals, + struct rte_eth_dev *member_eth_dev); void -slave_add(struct bond_dev_private *internals, - struct rte_eth_dev *slave_eth_dev); +member_add(struct bond_dev_private *internals, + struct rte_eth_dev *member_eth_dev); void burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves); + uint16_t member_count, uint16_t *members); void burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves); + uint16_t member_count, uint16_t *members); void burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves); + uint16_t member_count, uint16_t *members); void bond_ethdev_primary_set(struct bond_dev_private *internals, - uint16_t slave_port_id); + uint16_t member_port_id); int bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param, void *ret_param); int -bond_ethdev_parse_slave_port_kvarg(const char *key, +bond_ethdev_parse_member_port_kvarg(const char *key, const char *value, void *extra_args); int -bond_ethdev_parse_slave_mode_kvarg(const char *key, +bond_ethdev_parse_member_mode_kvarg(const char *key, const char *value, void *extra_args); int -bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused, +bond_ethdev_parse_member_agg_mode_kvarg(const char *key __rte_unused, const char *value, void *extra_args); int @@ -301,7 +303,7 @@ bond_ethdev_parse_socket_id_kvarg(const char *key, const char *value, void *extra_args); int -bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key, +bond_ethdev_parse_primary_member_port_id_kvarg(const char *key, const char *value, void *extra_args); int @@ -323,7 +325,7 @@ void bond_tlb_enable(struct bond_dev_private *internals); void -bond_tlb_activate_slave(struct bond_dev_private *internals); +bond_tlb_activate_member(struct bond_dev_private *internals); int bond_ethdev_stop(struct rte_eth_dev *eth_dev); diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h index 874aa91a5fe..f10165f2c6f 100644 --- a/drivers/net/bonding/rte_eth_bond.h +++ b/drivers/net/bonding/rte_eth_bond.h @@ -10,7 +10,7 @@ * * RTE Link Bonding Ethernet Device * Link Bonding for 1GbE and 10GbE ports to allow the aggregation of multiple - * (slave) NICs into a single logical interface. The bonded device processes + * (member) NICs into a single logical interface. The bonding device processes * these interfaces based on the mode of operation specified and supported. * This implementation supports 4 modes of operation round robin, active backup * balance and broadcast. Providing redundant links, fault tolerance and/or @@ -28,24 +28,28 @@ extern "C" { #define BONDING_MODE_ROUND_ROBIN (0) /**< Round Robin (Mode 0). * In this mode all transmitted packets will be balanced equally across all - * active slaves of the bonded in a round robin fashion. */ + * active members of the bonding in a round robin fashion. + */ #define BONDING_MODE_ACTIVE_BACKUP (1) /**< Active Backup (Mode 1). * In this mode all packets transmitted will be transmitted on the primary - * slave until such point as the primary slave is no longer available and then - * transmitted packets will be sent on the next available slaves. The primary - * slave can be defined by the user but defaults to the first active slave - * available if not specified. */ + * member until such point as the primary member is no longer available and then + * transmitted packets will be sent on the next available members. The primary + * member can be defined by the user but defaults to the first active member + * available if not specified. + */ #define BONDING_MODE_BALANCE (2) /**< Balance (Mode 2). * In this mode all packets transmitted will be balanced across the available - * slaves using one of three available transmit policies - l2, l2+3 or l3+4. + * members using one of three available transmit policies - l2, l2+3 or l3+4. * See BALANCE_XMIT_POLICY macros definitions for further details on transmit - * policies. */ + * policies. + */ #define BONDING_MODE_BROADCAST (3) /**< Broadcast (Mode 3). * In this mode all transmitted packets will be transmitted on all available - * active slaves of the bonded. */ + * active members of the bonding. + */ #define BONDING_MODE_8023AD (4) /**< 802.3AD (Mode 4). * @@ -62,22 +66,22 @@ extern "C" { * be handled with the expected latency and this may cause the link status to be * incorrectly marked as down or failure to correctly negotiate with peers. * - For optimal performance during initial handshaking the array of mbufs provided - * to rx_burst should be at least 2 times the slave count size. - * + * to rx_burst should be at least 2 times the member count size. */ #define BONDING_MODE_TLB (5) /**< Adaptive TLB (Mode 5) * This mode provides an adaptive transmit load balancing. It dynamically - * changes the transmitting slave, according to the computed load. Statistics - * are collected in 100ms intervals and scheduled every 10ms */ + * changes the transmitting member, according to the computed load. Statistics + * are collected in 100ms intervals and scheduled every 10ms. + */ #define BONDING_MODE_ALB (6) /**< Adaptive Load Balancing (Mode 6) * This mode includes adaptive TLB and receive load balancing (RLB). In RLB the * bonding driver intercepts ARP replies send by local system and overwrites its * source MAC address, so that different peers send data to the server on - * different slave interfaces. When local system sends ARP request, it saves IP + * different member interfaces. When local system sends ARP request, it saves IP * information from it. When ARP reply from that peer is received, its MAC is - * stored, one of slave MACs assigned and ARP reply send to that peer. + * stored, one of member MACs assigned and ARP reply send to that peer. */ /* Balance Mode Transmit Policies */ @@ -89,7 +93,7 @@ extern "C" { /**< Layer 3+4 (IP Addresses + UDP Ports) transmit load balancing */ /** - * Create a bonded rte_eth_dev device + * Create a bonding rte_eth_dev device * * @param name Name of new link bonding device. * @param mode Mode to initialize bonding device in. @@ -102,7 +106,7 @@ int rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id); /** - * Free a bonded rte_eth_dev device + * Free a bonding rte_eth_dev device * * @param name Name of the link bonding device. * @@ -113,162 +117,166 @@ int rte_eth_bond_free(const char *name); /** - * Add a rte_eth_dev device as a slave to the bonded device + * Add a rte_eth_dev device as a member to the bonding device * - * @param bonded_port_id Port ID of bonded device. - * @param slave_port_id Port ID of slave device. + * @param bonding_port_id Port ID of bonding device. + * @param member_port_id Port ID of member device. * * @return * 0 on success, negative value otherwise */ +__rte_experimental int -rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id); +rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id); /** - * Remove a slave rte_eth_dev device from the bonded device + * Remove a member rte_eth_dev device from the bonding device * - * @param bonded_port_id Port ID of bonded device. - * @param slave_port_id Port ID of slave device. + * @param bonding_port_id Port ID of bonding device. + * @param member_port_id Port ID of member device. * * @return * 0 on success, negative value otherwise */ +__rte_experimental int -rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id); +rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id); /** - * Set link bonding mode of bonded device + * Set link bonding mode of bonding device * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * @param mode Bonding mode to set * * @return * 0 on success, negative value otherwise */ int -rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode); +rte_eth_bond_mode_set(uint16_t bonding_port_id, uint8_t mode); /** - * Get link bonding mode of bonded device + * Get link bonding mode of bonding device * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return * link bonding mode on success, negative value otherwise */ int -rte_eth_bond_mode_get(uint16_t bonded_port_id); +rte_eth_bond_mode_get(uint16_t bonding_port_id); /** - * Set slave rte_eth_dev as primary slave of bonded device + * Set member rte_eth_dev as primary member of bonding device * - * @param bonded_port_id Port ID of bonded device. - * @param slave_port_id Port ID of slave device. + * @param bonding_port_id Port ID of bonding device. + * @param member_port_id Port ID of member device. * * @return * 0 on success, negative value otherwise */ int -rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id); +rte_eth_bond_primary_set(uint16_t bonding_port_id, uint16_t member_port_id); /** - * Get primary slave of bonded device + * Get primary member of bonding device * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return - * Port Id of primary slave on success, -1 on failure + * Port Id of primary member on success, -1 on failure */ int -rte_eth_bond_primary_get(uint16_t bonded_port_id); +rte_eth_bond_primary_get(uint16_t bonding_port_id); /** - * Populate an array with list of the slaves port id's of the bonded device + * Populate an array with list of the members port id's of the bonding device * - * @param bonded_port_id Port ID of bonded eth_dev to interrogate - * @param slaves Array to be populated with the current active slaves - * @param len Length of slaves array + * @param bonding_port_id Port ID of bonding eth_dev to interrogate + * @param members Array to be populated with the current active members + * @param len Length of members array * * @return - * Number of slaves associated with bonded device on success, + * Number of members associated with bonding device on success, * negative value otherwise */ +__rte_experimental int -rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], - uint16_t len); +rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[], + uint16_t len); /** - * Populate an array with list of the active slaves port id's of the bonded + * Populate an array with list of the active members port id's of the bonding * device. * - * @param bonded_port_id Port ID of bonded eth_dev to interrogate - * @param slaves Array to be populated with the current active slaves - * @param len Length of slaves array + * @param bonding_port_id Port ID of bonding eth_dev to interrogate + * @param members Array to be populated with the current active members + * @param len Length of members array * * @return - * Number of active slaves associated with bonded device on success, + * Number of active members associated with bonding device on success, * negative value otherwise */ +__rte_experimental int -rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], - uint16_t len); +rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[], + uint16_t len); /** - * Set explicit MAC address to use on bonded device and it's slaves. + * Set explicit MAC address to use on bonding device and it's members. * - * @param bonded_port_id Port ID of bonded device. - * @param mac_addr MAC Address to use on bonded device overriding - * slaves MAC addresses + * @param bonding_port_id Port ID of bonding device. + * @param mac_addr MAC Address to use on bonding device overriding + * members MAC addresses * * @return * 0 on success, negative value otherwise */ int -rte_eth_bond_mac_address_set(uint16_t bonded_port_id, +rte_eth_bond_mac_address_set(uint16_t bonding_port_id, struct rte_ether_addr *mac_addr); /** - * Reset bonded device to use MAC from primary slave on bonded device and it's - * slaves. + * Reset bonding device to use MAC from primary member on bonding device and it's + * members. * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return * 0 on success, negative value otherwise */ int -rte_eth_bond_mac_address_reset(uint16_t bonded_port_id); +rte_eth_bond_mac_address_reset(uint16_t bonding_port_id); /** - * Set the transmit policy for bonded device to use when it is operating in + * Set the transmit policy for bonding device to use when it is operating in * balance mode, this parameter is otherwise ignored in other modes of * operation. * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * @param policy Balance mode transmission policy. * * @return * 0 on success, negative value otherwise. */ int -rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy); +rte_eth_bond_xmit_policy_set(uint16_t bonding_port_id, uint8_t policy); /** - * Get the transmit policy set on bonded device for balance mode operation + * Get the transmit policy set on bonding device for balance mode operation * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return * Balance transmit policy on success, negative value otherwise. */ int -rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id); +rte_eth_bond_xmit_policy_get(uint16_t bonding_port_id); /** * Set the link monitoring frequency (in ms) for monitoring the link status of - * slave devices + * member devices * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * @param internal_ms Monitoring interval in milliseconds * * @return @@ -276,72 +284,72 @@ rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id); */ int -rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms); +rte_eth_bond_link_monitoring_set(uint16_t bonding_port_id, uint32_t internal_ms); /** * Get the current link monitoring frequency (in ms) for monitoring of the link - * status of slave devices + * status of member devices * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return * Monitoring interval on success, negative value otherwise. */ int -rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id); +rte_eth_bond_link_monitoring_get(uint16_t bonding_port_id); /** - * Set the period in milliseconds for delaying the disabling of a bonded link + * Set the period in milliseconds for delaying the disabling of a bonding link * when the link down status has been detected * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * @param delay_ms Delay period in milliseconds. * * @return * 0 on success, negative value otherwise. */ int -rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id, +rte_eth_bond_link_down_prop_delay_set(uint16_t bonding_port_id, uint32_t delay_ms); /** - * Get the period in milliseconds set for delaying the disabling of a bonded + * Get the period in milliseconds set for delaying the disabling of a bonding * link when the link down status has been detected * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return * Delay period on success, negative value otherwise. */ int -rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id); +rte_eth_bond_link_down_prop_delay_get(uint16_t bonding_port_id); /** - * Set the period in milliseconds for delaying the enabling of a bonded link + * Set the period in milliseconds for delaying the enabling of a bonding link * when the link up status has been detected * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * @param delay_ms Delay period in milliseconds. * * @return * 0 on success, negative value otherwise. */ int -rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, +rte_eth_bond_link_up_prop_delay_set(uint16_t bonding_port_id, uint32_t delay_ms); /** - * Get the period in milliseconds set for delaying the enabling of a bonded + * Get the period in milliseconds set for delaying the enabling of a bonding * link when the link up status has been detected * - * @param bonded_port_id Port ID of bonded device. + * @param bonding_port_id Port ID of bonding device. * * @return * Delay period on success, negative value otherwise. */ int -rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id); +rte_eth_bond_link_up_prop_delay_get(uint16_t bonding_port_id); #ifdef __cplusplus diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index 4a266bb2caf..677067870f8 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -19,7 +19,7 @@ static void bond_mode_8023ad_ext_periodic_cb(void *arg); #define MODE4_DEBUG(fmt, ...) \ rte_log(RTE_LOG_DEBUG, bond_logtype, \ "%6u [Port %u: %s] " fmt, \ - bond_dbg_get_time_diff_ms(), slave_id, \ + bond_dbg_get_time_diff_ms(), member_id, \ __func__, ##__VA_ARGS__) static uint64_t start_time; @@ -184,9 +184,9 @@ set_warning_flags(struct port *port, uint16_t flags) } static void -show_warnings(uint16_t slave_id) +show_warnings(uint16_t member_id) { - struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[member_id]; uint8_t warnings; do { @@ -205,36 +205,36 @@ show_warnings(uint16_t slave_id) if (warnings & WRN_RX_QUEUE_FULL) { RTE_BOND_LOG(DEBUG, - "Slave %u: failed to enqueue LACP packet into RX ring.\n" - "Receive and transmit functions must be invoked on bonded" + "Member %u: failed to enqueue LACP packet into RX ring.\n" + "Receive and transmit functions must be invoked on bonding" "interface at least 10 times per second or LACP will notwork correctly", - slave_id); + member_id); } if (warnings & WRN_TX_QUEUE_FULL) { RTE_BOND_LOG(DEBUG, - "Slave %u: failed to enqueue LACP packet into TX ring.\n" - "Receive and transmit functions must be invoked on bonded" + "Member %u: failed to enqueue LACP packet into TX ring.\n" + "Receive and transmit functions must be invoked on bonding" "interface at least 10 times per second or LACP will not work correctly", - slave_id); + member_id); } if (warnings & WRN_RX_MARKER_TO_FAST) - RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.", - slave_id); + RTE_BOND_LOG(INFO, "Member %u: marker to early - ignoring.", + member_id); if (warnings & WRN_UNKNOWN_SLOW_TYPE) { RTE_BOND_LOG(INFO, - "Slave %u: ignoring unknown slow protocol frame type", - slave_id); + "Member %u: ignoring unknown slow protocol frame type", + member_id); } if (warnings & WRN_UNKNOWN_MARKER_TYPE) - RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type", - slave_id); + RTE_BOND_LOG(INFO, "Member %u: ignoring unknown marker type", + member_id); if (warnings & WRN_NOT_LACP_CAPABLE) - MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id); + MODE4_DEBUG("Port %u is not LACP capable!\n", member_id); } static void @@ -256,10 +256,10 @@ record_default(struct port *port) * @param port Port on which LACPDU was received. */ static void -rx_machine(struct bond_dev_private *internals, uint16_t slave_id, +rx_machine(struct bond_dev_private *internals, uint16_t member_id, struct lacpdu *lacp) { - struct port *agg, *port = &bond_mode_8023ad_ports[slave_id]; + struct port *agg, *port = &bond_mode_8023ad_ports[member_id]; uint64_t timeout; if (SM_FLAG(port, BEGIN)) { @@ -389,9 +389,9 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id, * @param port Port to handle state machine. */ static void -periodic_machine(struct bond_dev_private *internals, uint16_t slave_id) +periodic_machine(struct bond_dev_private *internals, uint16_t member_id) { - struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[member_id]; /* Calculate if either site is LACP enabled */ uint64_t timeout; uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) || @@ -451,9 +451,9 @@ periodic_machine(struct bond_dev_private *internals, uint16_t slave_id) * @param port Port to handle state machine. */ static void -mux_machine(struct bond_dev_private *internals, uint16_t slave_id) +mux_machine(struct bond_dev_private *internals, uint16_t member_id) { - struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[member_id]; /* Save current state for later use */ const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | @@ -527,8 +527,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) SM_FLAG_SET(port, NTT); MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n"); RTE_BOND_LOG(INFO, - "Bond %u: slave id %u distributing started.", - internals->port_id, slave_id); + "Bond %u: member id %u distributing started.", + internals->port_id, member_id); } } else { if (!PARTNER_STATE(port, COLLECTING)) { @@ -538,8 +538,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) SM_FLAG_SET(port, NTT); MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n"); RTE_BOND_LOG(INFO, - "Bond %u: slave id %u distributing stopped.", - internals->port_id, slave_id); + "Bond %u: member id %u distributing stopped.", + internals->port_id, member_id); } } } @@ -554,9 +554,9 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) * @param port */ static void -tx_machine(struct bond_dev_private *internals, uint16_t slave_id) +tx_machine(struct bond_dev_private *internals, uint16_t member_id) { - struct port *agg, *port = &bond_mode_8023ad_ports[slave_id]; + struct port *agg, *port = &bond_mode_8023ad_ports[member_id]; struct rte_mbuf *lacp_pkt = NULL; struct lacpdu_header *hdr; @@ -587,7 +587,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) /* Source and destination MAC */ rte_ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.dst_addr); - rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.src_addr); + rte_eth_macaddr_get(member_id, &hdr->eth_hdr.src_addr); hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW); lacpdu = &hdr->lacpdu; @@ -635,10 +635,10 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) return; } } else { - uint16_t pkts_sent = rte_eth_tx_prepare(slave_id, + uint16_t pkts_sent = rte_eth_tx_prepare(member_id, internals->mode4.dedicated_queues.tx_qid, &lacp_pkt, 1); - pkts_sent = rte_eth_tx_burst(slave_id, + pkts_sent = rte_eth_tx_burst(member_id, internals->mode4.dedicated_queues.tx_qid, &lacp_pkt, pkts_sent); if (pkts_sent != 1) { @@ -679,40 +679,40 @@ max_index(uint64_t *a, int n) * @param port_pos Port to assign. */ static void -selection_logic(struct bond_dev_private *internals, uint16_t slave_id) +selection_logic(struct bond_dev_private *internals, uint16_t member_id) { struct port *agg, *port; - uint16_t slaves_count, new_agg_id, i, j = 0; - uint16_t *slaves; + uint16_t members_count, new_agg_id, i, j = 0; + uint16_t *members; uint64_t agg_bandwidth[RTE_MAX_ETHPORTS] = {0}; uint64_t agg_count[RTE_MAX_ETHPORTS] = {0}; - uint16_t default_slave = 0; + uint16_t default_member = 0; struct rte_eth_link link_info; uint16_t agg_new_idx = 0; int ret; - slaves = internals->active_slaves; - slaves_count = internals->active_slave_count; - port = &bond_mode_8023ad_ports[slave_id]; + members = internals->active_members; + members_count = internals->active_member_count; + port = &bond_mode_8023ad_ports[member_id]; /* Search for aggregator suitable for this port */ - for (i = 0; i < slaves_count; ++i) { - agg = &bond_mode_8023ad_ports[slaves[i]]; + for (i = 0; i < members_count; ++i) { + agg = &bond_mode_8023ad_ports[members[i]]; /* Skip ports that are not aggregators */ - if (agg->aggregator_port_id != slaves[i]) + if (agg->aggregator_port_id != members[i]) continue; - ret = rte_eth_link_get_nowait(slaves[i], &link_info); + ret = rte_eth_link_get_nowait(members[i], &link_info); if (ret < 0) { RTE_BOND_LOG(ERR, - "Slave (port %u) link get failed: %s\n", - slaves[i], rte_strerror(-ret)); + "Member (port %u) link get failed: %s\n", + members[i], rte_strerror(-ret)); continue; } agg_count[i] += 1; agg_bandwidth[i] += link_info.link_speed; - /* Actors system ID is not checked since all slave device have the same + /* Actors system ID is not checked since all member device have the same * ID (MAC address). */ if ((agg->actor.key == port->actor.key && agg->partner.system_priority == port->partner.system_priority && @@ -724,31 +724,31 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id) rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) { if (j == 0) - default_slave = i; + default_member = i; j++; } } switch (internals->mode4.agg_selection) { case AGG_COUNT: - agg_new_idx = max_index(agg_count, slaves_count); - new_agg_id = slaves[agg_new_idx]; + agg_new_idx = max_index(agg_count, members_count); + new_agg_id = members[agg_new_idx]; break; case AGG_BANDWIDTH: - agg_new_idx = max_index(agg_bandwidth, slaves_count); - new_agg_id = slaves[agg_new_idx]; + agg_new_idx = max_index(agg_bandwidth, members_count); + new_agg_id = members[agg_new_idx]; break; case AGG_STABLE: - if (default_slave == slaves_count) - new_agg_id = slaves[slave_id]; + if (default_member == members_count) + new_agg_id = members[member_id]; else - new_agg_id = slaves[default_slave]; + new_agg_id = members[default_member]; break; default: - if (default_slave == slaves_count) - new_agg_id = slaves[slave_id]; + if (default_member == members_count) + new_agg_id = members[member_id]; else - new_agg_id = slaves[default_slave]; + new_agg_id = members[default_member]; break; } @@ -758,7 +758,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id) MODE4_DEBUG("-> SELECTED: ID=%3u\n" "\t%s aggregator ID=%3u\n", port->aggregator_port_id, - port->aggregator_port_id == slave_id ? + port->aggregator_port_id == member_id ? "aggregator not found, using default" : "aggregator found", port->aggregator_port_id); } @@ -802,7 +802,7 @@ link_speed_key(uint16_t speed) { } static void -rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id, +rx_machine_update(struct bond_dev_private *internals, uint16_t member_id, struct rte_mbuf *lacp_pkt) { struct lacpdu_header *lacp; struct lacpdu_actor_partner_params *partner; @@ -813,7 +813,7 @@ rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id, RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); partner = &lacp->lacpdu.partner; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; agg = &bond_mode_8023ad_ports[port->aggregator_port_id]; if (rte_is_zero_ether_addr(&partner->port_params.system) || @@ -822,7 +822,7 @@ rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id, /* This LACP frame is sending to the bonding port * so pass it to rx_machine. */ - rx_machine(internals, slave_id, &lacp->lacpdu); + rx_machine(internals, member_id, &lacp->lacpdu); } else { char preferred_system_name[RTE_ETHER_ADDR_FMT_SIZE]; char self_system_name[RTE_ETHER_ADDR_FMT_SIZE]; @@ -837,16 +837,16 @@ rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id, } rte_pktmbuf_free(lacp_pkt); } else - rx_machine(internals, slave_id, NULL); + rx_machine(internals, member_id, NULL); } static void bond_mode_8023ad_dedicated_rxq_process(struct bond_dev_private *internals, - uint16_t slave_id) + uint16_t member_id) { #define DEDICATED_QUEUE_BURST_SIZE 32 struct rte_mbuf *lacp_pkt[DEDICATED_QUEUE_BURST_SIZE]; - uint16_t rx_count = rte_eth_rx_burst(slave_id, + uint16_t rx_count = rte_eth_rx_burst(member_id, internals->mode4.dedicated_queues.rx_qid, lacp_pkt, DEDICATED_QUEUE_BURST_SIZE); @@ -854,10 +854,10 @@ bond_mode_8023ad_dedicated_rxq_process(struct bond_dev_private *internals, uint16_t i; for (i = 0; i < rx_count; i++) - bond_mode_8023ad_handle_slow_pkt(internals, slave_id, + bond_mode_8023ad_handle_slow_pkt(internals, member_id, lacp_pkt[i]); } else { - rx_machine_update(internals, slave_id, NULL); + rx_machine_update(internals, member_id, NULL); } } @@ -868,23 +868,23 @@ bond_mode_8023ad_periodic_cb(void *arg) struct bond_dev_private *internals = bond_dev->data->dev_private; struct port *port; struct rte_eth_link link_info; - struct rte_ether_addr slave_addr; + struct rte_ether_addr member_addr; struct rte_mbuf *lacp_pkt = NULL; - uint16_t slave_id; + uint16_t member_id; uint16_t i; /* Update link status on each port */ - for (i = 0; i < internals->active_slave_count; i++) { + for (i = 0; i < internals->active_member_count; i++) { uint16_t key; int ret; - slave_id = internals->active_slaves[i]; - ret = rte_eth_link_get_nowait(slave_id, &link_info); + member_id = internals->active_members[i]; + ret = rte_eth_link_get_nowait(member_id, &link_info); if (ret < 0) { RTE_BOND_LOG(ERR, - "Slave (port %u) link get failed: %s\n", - slave_id, rte_strerror(-ret)); + "Member (port %u) link get failed: %s\n", + member_id, rte_strerror(-ret)); } if (ret >= 0 && link_info.link_status != 0) { @@ -895,8 +895,8 @@ bond_mode_8023ad_periodic_cb(void *arg) key = 0; } - rte_eth_macaddr_get(slave_id, &slave_addr); - port = &bond_mode_8023ad_ports[slave_id]; + rte_eth_macaddr_get(member_id, &member_addr); + port = &bond_mode_8023ad_ports[member_id]; key = rte_cpu_to_be_16(key); if (key != port->actor.key) { @@ -907,16 +907,16 @@ bond_mode_8023ad_periodic_cb(void *arg) SM_FLAG_SET(port, NTT); } - if (!rte_is_same_ether_addr(&port->actor.system, &slave_addr)) { - rte_ether_addr_copy(&slave_addr, &port->actor.system); - if (port->aggregator_port_id == slave_id) + if (!rte_is_same_ether_addr(&port->actor.system, &member_addr)) { + rte_ether_addr_copy(&member_addr, &port->actor.system); + if (port->aggregator_port_id == member_id) SM_FLAG_SET(port, NTT); } } - for (i = 0; i < internals->active_slave_count; i++) { - slave_id = internals->active_slaves[i]; - port = &bond_mode_8023ad_ports[slave_id]; + for (i = 0; i < internals->active_member_count; i++) { + member_id = internals->active_members[i]; + port = &bond_mode_8023ad_ports[member_id]; if ((port->actor.key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) { @@ -947,19 +947,19 @@ bond_mode_8023ad_periodic_cb(void *arg) if (retval != 0) lacp_pkt = NULL; - rx_machine_update(internals, slave_id, lacp_pkt); + rx_machine_update(internals, member_id, lacp_pkt); } else { bond_mode_8023ad_dedicated_rxq_process(internals, - slave_id); + member_id); } - periodic_machine(internals, slave_id); - mux_machine(internals, slave_id); - tx_machine(internals, slave_id); - selection_logic(internals, slave_id); + periodic_machine(internals, member_id); + mux_machine(internals, member_id); + tx_machine(internals, member_id); + selection_logic(internals, member_id); SM_FLAG_CLR(port, BEGIN); - show_warnings(slave_id); + show_warnings(member_id); } rte_eal_alarm_set(internals->mode4.update_timeout_us, @@ -967,34 +967,34 @@ bond_mode_8023ad_periodic_cb(void *arg) } static int -bond_mode_8023ad_register_lacp_mac(uint16_t slave_id) +bond_mode_8023ad_register_lacp_mac(uint16_t member_id) { int ret; - ret = rte_eth_allmulticast_enable(slave_id); + ret = rte_eth_allmulticast_enable(member_id); if (ret != 0) { RTE_BOND_LOG(ERR, "failed to enable allmulti mode for port %u: %s", - slave_id, rte_strerror(-ret)); + member_id, rte_strerror(-ret)); } - if (rte_eth_allmulticast_get(slave_id)) { + if (rte_eth_allmulticast_get(member_id)) { RTE_BOND_LOG(DEBUG, "forced allmulti for port %u", - slave_id); - bond_mode_8023ad_ports[slave_id].forced_rx_flags = + member_id); + bond_mode_8023ad_ports[member_id].forced_rx_flags = BOND_8023AD_FORCED_ALLMULTI; return 0; } - ret = rte_eth_promiscuous_enable(slave_id); + ret = rte_eth_promiscuous_enable(member_id); if (ret != 0) { RTE_BOND_LOG(ERR, "failed to enable promiscuous mode for port %u: %s", - slave_id, rte_strerror(-ret)); + member_id, rte_strerror(-ret)); } - if (rte_eth_promiscuous_get(slave_id)) { + if (rte_eth_promiscuous_get(member_id)) { RTE_BOND_LOG(DEBUG, "forced promiscuous for port %u", - slave_id); - bond_mode_8023ad_ports[slave_id].forced_rx_flags = + member_id); + bond_mode_8023ad_ports[member_id].forced_rx_flags = BOND_8023AD_FORCED_PROMISC; return 0; } @@ -1003,27 +1003,27 @@ bond_mode_8023ad_register_lacp_mac(uint16_t slave_id) } static void -bond_mode_8023ad_unregister_lacp_mac(uint16_t slave_id) +bond_mode_8023ad_unregister_lacp_mac(uint16_t member_id) { int ret; - switch (bond_mode_8023ad_ports[slave_id].forced_rx_flags) { + switch (bond_mode_8023ad_ports[member_id].forced_rx_flags) { case BOND_8023AD_FORCED_ALLMULTI: - RTE_BOND_LOG(DEBUG, "unset allmulti for port %u", slave_id); - ret = rte_eth_allmulticast_disable(slave_id); + RTE_BOND_LOG(DEBUG, "unset allmulti for port %u", member_id); + ret = rte_eth_allmulticast_disable(member_id); if (ret != 0) RTE_BOND_LOG(ERR, "failed to disable allmulti mode for port %u: %s", - slave_id, rte_strerror(-ret)); + member_id, rte_strerror(-ret)); break; case BOND_8023AD_FORCED_PROMISC: - RTE_BOND_LOG(DEBUG, "unset promisc for port %u", slave_id); - ret = rte_eth_promiscuous_disable(slave_id); + RTE_BOND_LOG(DEBUG, "unset promisc for port %u", member_id); + ret = rte_eth_promiscuous_disable(member_id); if (ret != 0) RTE_BOND_LOG(ERR, "failed to disable promiscuous mode for port %u: %s", - slave_id, rte_strerror(-ret)); + member_id, rte_strerror(-ret)); break; default: @@ -1032,12 +1032,12 @@ bond_mode_8023ad_unregister_lacp_mac(uint16_t slave_id) } void -bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, - uint16_t slave_id) +bond_mode_8023ad_activate_member(struct rte_eth_dev *bond_dev, + uint16_t member_id) { struct bond_dev_private *internals = bond_dev->data->dev_private; - struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[member_id]; struct port_params initial = { .system = { { 0 } }, .system_priority = rte_cpu_to_be_16(0xFFFF), @@ -1053,15 +1053,15 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, struct bond_tx_queue *bd_tx_q; uint16_t q_id; - /* Given slave mus not be in active list */ - RTE_ASSERT(find_slave_by_id(internals->active_slaves, - internals->active_slave_count, slave_id) == internals->active_slave_count); + /* Given member mus not be in active list */ + RTE_ASSERT(find_member_by_id(internals->active_members, + internals->active_member_count, member_id) == internals->active_member_count); RTE_SET_USED(internals); /* used only for assert when enabled */ memcpy(&port->actor, &initial, sizeof(struct port_params)); /* Standard requires that port ID must be grater than 0. * Add 1 do get corresponding port_number */ - port->actor.port_number = rte_cpu_to_be_16(slave_id + 1); + port->actor.port_number = rte_cpu_to_be_16(member_id + 1); memcpy(&port->partner, &initial, sizeof(struct port_params)); memcpy(&port->partner_admin, &initial, sizeof(struct port_params)); @@ -1072,11 +1072,11 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, port->sm_flags = SM_FLAGS_BEGIN; /* use this port as aggregator */ - port->aggregator_port_id = slave_id; + port->aggregator_port_id = member_id; - if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) { - RTE_BOND_LOG(WARNING, "slave %u is most likely broken and won't receive LACP packets", - slave_id); + if (bond_mode_8023ad_register_lacp_mac(member_id) < 0) { + RTE_BOND_LOG(WARNING, "member %u is most likely broken and won't receive LACP packets", + member_id); } timer_cancel(&port->warning_timer); @@ -1087,22 +1087,24 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, RTE_ASSERT(port->rx_ring == NULL); RTE_ASSERT(port->tx_ring == NULL); - socket_id = rte_eth_dev_socket_id(slave_id); + socket_id = rte_eth_dev_socket_id(member_id); if (socket_id == -1) socket_id = rte_socket_id(); element_size = sizeof(struct slow_protocol_frame) + RTE_PKTMBUF_HEADROOM; - /* The size of the mempool should be at least: - * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */ - total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS; + /* + * The size of the mempool should be at least: + * the sum of the TX descriptors + BOND_MODE_8023AX_MEMBER_TX_PKTS. + */ + total_tx_desc = BOND_MODE_8023AX_MEMBER_TX_PKTS; for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) { bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id]; total_tx_desc += bd_tx_q->nb_tx_desc; } - snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id); + snprintf(mem_name, RTE_DIM(mem_name), "member_port%u_pool", member_id); port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc, RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, @@ -1111,39 +1113,39 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, /* Any memory allocation failure in initialization is critical because * resources can't be free, so reinitialization is impossible. */ if (port->mbuf_pool == NULL) { - rte_panic("Slave %u: Failed to create memory pool '%s': %s\n", - slave_id, mem_name, rte_strerror(rte_errno)); + rte_panic("Member %u: Failed to create memory pool '%s': %s\n", + member_id, mem_name, rte_strerror(rte_errno)); } - snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id); + snprintf(mem_name, RTE_DIM(mem_name), "member_%u_rx", member_id); port->rx_ring = rte_ring_create(mem_name, - rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0); + rte_align32pow2(BOND_MODE_8023AX_MEMBER_RX_PKTS), socket_id, 0); if (port->rx_ring == NULL) { - rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id, + rte_panic("Member %u: Failed to create rx ring '%s': %s\n", member_id, mem_name, rte_strerror(rte_errno)); } /* TX ring is at least one pkt longer to make room for marker packet. */ - snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id); + snprintf(mem_name, RTE_DIM(mem_name), "member_%u_tx", member_id); port->tx_ring = rte_ring_create(mem_name, - rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0); + rte_align32pow2(BOND_MODE_8023AX_MEMBER_TX_PKTS + 1), socket_id, 0); if (port->tx_ring == NULL) { - rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id, + rte_panic("Member %u: Failed to create tx ring '%s': %s\n", member_id, mem_name, rte_strerror(rte_errno)); } } int -bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused, - uint16_t slave_id) +bond_mode_8023ad_deactivate_member(struct rte_eth_dev *bond_dev __rte_unused, + uint16_t member_id) { void *pkt = NULL; struct port *port = NULL; uint8_t old_partner_state; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; ACTOR_STATE_CLR(port, AGGREGATION); port->selected = UNSELECTED; @@ -1151,7 +1153,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused, old_partner_state = port->partner_state; record_default(port); - bond_mode_8023ad_unregister_lacp_mac(slave_id); + bond_mode_8023ad_unregister_lacp_mac(member_id); /* If partner timeout state changes then disable timer */ if (!((old_partner_state ^ port->partner_state) & @@ -1174,30 +1176,30 @@ void bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) { struct bond_dev_private *internals = bond_dev->data->dev_private; - struct rte_ether_addr slave_addr; - struct port *slave, *agg_slave; - uint16_t slave_id, i, j; + struct rte_ether_addr member_addr; + struct port *member, *agg_member; + uint16_t member_id, i, j; bond_mode_8023ad_stop(bond_dev); - for (i = 0; i < internals->active_slave_count; i++) { - slave_id = internals->active_slaves[i]; - slave = &bond_mode_8023ad_ports[slave_id]; - rte_eth_macaddr_get(slave_id, &slave_addr); + for (i = 0; i < internals->active_member_count; i++) { + member_id = internals->active_members[i]; + member = &bond_mode_8023ad_ports[member_id]; + rte_eth_macaddr_get(member_id, &member_addr); - if (rte_is_same_ether_addr(&slave_addr, &slave->actor.system)) + if (rte_is_same_ether_addr(&member_addr, &member->actor.system)) continue; - rte_ether_addr_copy(&slave_addr, &slave->actor.system); + rte_ether_addr_copy(&member_addr, &member->actor.system); /* Do nothing if this port is not an aggregator. In other case * Set NTT flag on every port that use this aggregator. */ - if (slave->aggregator_port_id != slave_id) + if (member->aggregator_port_id != member_id) continue; - for (j = 0; j < internals->active_slave_count; j++) { - agg_slave = &bond_mode_8023ad_ports[internals->active_slaves[j]]; - if (agg_slave->aggregator_port_id == slave_id) - SM_FLAG_SET(agg_slave, NTT); + for (j = 0; j < internals->active_member_count; j++) { + agg_member = &bond_mode_8023ad_ports[internals->active_members[j]]; + if (agg_member->aggregator_port_id == member_id) + SM_FLAG_SET(agg_member, NTT); } } @@ -1288,9 +1290,9 @@ bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev) struct bond_dev_private *internals = bond_dev->data->dev_private; uint16_t i; - for (i = 0; i < internals->active_slave_count; i++) - bond_mode_8023ad_activate_slave(bond_dev, - internals->active_slaves[i]); + for (i = 0; i < internals->active_member_count; i++) + bond_mode_8023ad_activate_member(bond_dev, + internals->active_members[i]); return 0; } @@ -1326,10 +1328,10 @@ bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev) void bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, - uint16_t slave_id, struct rte_mbuf *pkt) + uint16_t member_id, struct rte_mbuf *pkt) { struct mode8023ad_private *mode4 = &internals->mode4; - struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[member_id]; struct marker_header *m_hdr; uint64_t marker_timer, old_marker_timer; int retval; @@ -1362,7 +1364,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, } while (unlikely(retval == 0)); m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP; - rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.src_addr); + rte_eth_macaddr_get(member_id, &m_hdr->eth_hdr.src_addr); if (internals->mode4.dedicated_queues.enabled == 0) { if (rte_ring_enqueue(port->tx_ring, pkt) != 0) { @@ -1373,10 +1375,10 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, } } else { /* Send packet directly to the slow queue */ - uint16_t tx_count = rte_eth_tx_prepare(slave_id, + uint16_t tx_count = rte_eth_tx_prepare(member_id, internals->mode4.dedicated_queues.tx_qid, &pkt, 1); - tx_count = rte_eth_tx_burst(slave_id, + tx_count = rte_eth_tx_burst(member_id, internals->mode4.dedicated_queues.tx_qid, &pkt, tx_count); if (tx_count != 1) { @@ -1394,7 +1396,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, goto free_out; } } else - rx_machine_update(internals, slave_id, pkt); + rx_machine_update(internals, member_id, pkt); } else { wrn = WRN_UNKNOWN_SLOW_TYPE; goto free_out; @@ -1413,7 +1415,7 @@ rte_eth_bond_8023ad_conf_get(uint16_t port_id, { struct rte_eth_dev *bond_dev; - if (valid_bonded_port_id(port_id) != 0) + if (valid_bonding_port_id(port_id) != 0) return -EINVAL; if (conf == NULL) @@ -1432,7 +1434,7 @@ rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id, struct bond_dev_private *internals; struct mode8023ad_private *mode4; - if (valid_bonded_port_id(port_id) != 0) + if (valid_bonding_port_id(port_id) != 0) return -EINVAL; bond_dev = &rte_eth_devices[port_id]; @@ -1454,7 +1456,7 @@ int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id) struct bond_dev_private *internals; struct mode8023ad_private *mode4; - if (valid_bonded_port_id(port_id) != 0) + if (valid_bonding_port_id(port_id) != 0) return -EINVAL; bond_dev = &rte_eth_devices[port_id]; @@ -1473,7 +1475,7 @@ static int bond_8023ad_setup_validate(uint16_t port_id, struct rte_eth_bond_8023ad_conf *conf) { - if (valid_bonded_port_id(port_id) != 0) + if (valid_bonding_port_id(port_id) != 0) return -EINVAL; if (conf != NULL) { @@ -1517,26 +1519,26 @@ rte_eth_bond_8023ad_setup(uint16_t port_id, int -rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, - struct rte_eth_bond_8023ad_slave_info *info) +rte_eth_bond_8023ad_member_info(uint16_t port_id, uint16_t member_id, + struct rte_eth_bond_8023ad_member_info *info) { struct rte_eth_dev *bond_dev; struct bond_dev_private *internals; struct port *port; - if (info == NULL || valid_bonded_port_id(port_id) != 0 || + if (info == NULL || valid_bonding_port_id(port_id) != 0 || rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) return -EINVAL; bond_dev = &rte_eth_devices[port_id]; internals = bond_dev->data->dev_private; - if (find_slave_by_id(internals->active_slaves, - internals->active_slave_count, slave_id) == - internals->active_slave_count) + if (find_member_by_id(internals->active_members, + internals->active_member_count, member_id) == + internals->active_member_count) return -EINVAL; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; info->selected = port->selected; info->actor_state = port->actor_state; @@ -1550,7 +1552,7 @@ rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, } static int -bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id) +bond_8023ad_ext_validate(uint16_t port_id, uint16_t member_id) { struct rte_eth_dev *bond_dev; struct bond_dev_private *internals; @@ -1565,9 +1567,9 @@ bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id) return -EINVAL; internals = bond_dev->data->dev_private; - if (find_slave_by_id(internals->active_slaves, - internals->active_slave_count, slave_id) == - internals->active_slave_count) + if (find_member_by_id(internals->active_members, + internals->active_member_count, member_id) == + internals->active_member_count) return -EINVAL; mode4 = &internals->mode4; @@ -1578,17 +1580,17 @@ bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id) } int -rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id, +rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t member_id, int enabled) { struct port *port; int res; - res = bond_8023ad_ext_validate(port_id, slave_id); + res = bond_8023ad_ext_validate(port_id, member_id); if (res != 0) return res; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; if (enabled) ACTOR_STATE_SET(port, COLLECTING); @@ -1599,17 +1601,17 @@ rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id, } int -rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id, +rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t member_id, int enabled) { struct port *port; int res; - res = bond_8023ad_ext_validate(port_id, slave_id); + res = bond_8023ad_ext_validate(port_id, member_id); if (res != 0) return res; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; if (enabled) ACTOR_STATE_SET(port, DISTRIBUTING); @@ -1620,45 +1622,45 @@ rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id, } int -rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id) +rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t member_id) { struct port *port; int err; - err = bond_8023ad_ext_validate(port_id, slave_id); + err = bond_8023ad_ext_validate(port_id, member_id); if (err != 0) return err; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; return ACTOR_STATE(port, DISTRIBUTING); } int -rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id) +rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t member_id) { struct port *port; int err; - err = bond_8023ad_ext_validate(port_id, slave_id); + err = bond_8023ad_ext_validate(port_id, member_id); if (err != 0) return err; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; return ACTOR_STATE(port, COLLECTING); } int -rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id, +rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t member_id, struct rte_mbuf *lacp_pkt) { struct port *port; int res; - res = bond_8023ad_ext_validate(port_id, slave_id); + res = bond_8023ad_ext_validate(port_id, member_id); if (res != 0) return res; - port = &bond_mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[member_id]; if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header)) return -EINVAL; @@ -1683,11 +1685,11 @@ bond_mode_8023ad_ext_periodic_cb(void *arg) struct mode8023ad_private *mode4 = &internals->mode4; struct port *port; void *pkt = NULL; - uint16_t i, slave_id; + uint16_t i, member_id; - for (i = 0; i < internals->active_slave_count; i++) { - slave_id = internals->active_slaves[i]; - port = &bond_mode_8023ad_ports[slave_id]; + for (i = 0; i < internals->active_member_count; i++) { + member_id = internals->active_members[i]; + port = &bond_mode_8023ad_ports[member_id]; if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { struct rte_mbuf *lacp_pkt = pkt; @@ -1700,7 +1702,7 @@ bond_mode_8023ad_ext_periodic_cb(void *arg) /* This is LACP frame so pass it to rx callback. * Callback is responsible for freeing mbuf. */ - mode4->slowrx_cb(slave_id, lacp_pkt); + mode4->slowrx_cb(member_id, lacp_pkt); } } @@ -1715,7 +1717,7 @@ rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port) struct rte_eth_dev *dev; struct bond_dev_private *internals; - if (valid_bonded_port_id(port) != 0) + if (valid_bonding_port_id(port) != 0) return -EINVAL; dev = &rte_eth_devices[port]; @@ -1741,7 +1743,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port) struct rte_eth_dev *dev; struct bond_dev_private *internals; - if (valid_bonded_port_id(port) != 0) + if (valid_bonding_port_id(port) != 0) return -EINVAL; dev = &rte_eth_devices[port]; diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h index 7ad8d6d00bd..4c280c75654 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.h +++ b/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -35,7 +35,7 @@ extern "C" { #define MARKER_TLV_TYPE_INFO 0x01 #define MARKER_TLV_TYPE_RESP 0x02 -typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t slave_id, +typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t member_id, struct rte_mbuf *lacp_pkt); enum rte_bond_8023ad_selection { @@ -66,13 +66,13 @@ struct port_params { uint16_t system_priority; /**< System priority (unused in current implementation) */ struct rte_ether_addr system; - /**< System ID - Slave MAC address, same as bonding MAC address */ + /**< System ID - Member MAC address, same as bonding MAC address */ uint16_t key; /**< Speed information (implementation dependent) and duplex. */ uint16_t port_priority; /**< Priority of this (unused in current implementation) */ uint16_t port_number; - /**< Port number. It corresponds to slave port id. */ + /**< Port number. It corresponds to member port id. */ } __rte_packed __rte_aligned(2); struct lacpdu_actor_partner_params { @@ -141,7 +141,7 @@ struct rte_eth_bond_8023ad_conf { enum rte_bond_8023ad_agg_selection agg_selection; }; -struct rte_eth_bond_8023ad_slave_info { +struct rte_eth_bond_8023ad_member_info { enum rte_bond_8023ad_selection selected; uint8_t actor_state; struct port_params actor; @@ -184,104 +184,101 @@ rte_eth_bond_8023ad_setup(uint16_t port_id, /** * @internal * - * Function returns current state of given slave device. + * Function returns current state of given member device. * - * @param slave_id Port id of valid slave. + * @param member_id Port id of valid member. * @param conf buffer for configuration * @return * 0 - if ok - * -EINVAL if conf is NULL or slave id is invalid (not a slave of given - * bonded device or is not inactive). + * -EINVAL if conf is NULL or member id is invalid (not a member of given + * bonding device or is not inactive). */ +__rte_experimental int -rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, - struct rte_eth_bond_8023ad_slave_info *conf); - -#ifdef __cplusplus -} -#endif +rte_eth_bond_8023ad_member_info(uint16_t port_id, uint16_t member_id, + struct rte_eth_bond_8023ad_member_info *conf); /** - * Configure a slave port to start collecting. + * Configure a member port to start collecting. * * @param port_id Bonding device id - * @param slave_id Port id of valid slave. + * @param member_id Port id of valid member. * @param enabled Non-zero when collection enabled. * @return * 0 - if ok - * -EINVAL if slave is not valid. + * -EINVAL if member is not valid. */ int -rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id, +rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t member_id, int enabled); /** - * Get COLLECTING flag from slave port actor state. + * Get COLLECTING flag from member port actor state. * * @param port_id Bonding device id - * @param slave_id Port id of valid slave. + * @param member_id Port id of valid member. * @return * 0 - if not set * 1 - if set - * -EINVAL if slave is not valid. + * -EINVAL if member is not valid. */ int -rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id); +rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t member_id); /** - * Configure a slave port to start distributing. + * Configure a member port to start distributing. * * @param port_id Bonding device id - * @param slave_id Port id of valid slave. + * @param member_id Port id of valid member. * @param enabled Non-zero when distribution enabled. * @return * 0 - if ok - * -EINVAL if slave is not valid. + * -EINVAL if member is not valid. */ int -rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id, +rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t member_id, int enabled); /** - * Get DISTRIBUTING flag from slave port actor state. + * Get DISTRIBUTING flag from member port actor state. * * @param port_id Bonding device id - * @param slave_id Port id of valid slave. + * @param member_id Port id of valid member. * @return * 0 - if not set * 1 - if set - * -EINVAL if slave is not valid. + * -EINVAL if member is not valid. */ int -rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id); +rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t member_id); /** * LACPDU transmit path for external 802.3ad state machine. Caller retains * ownership of the packet on failure. * * @param port_id Bonding device id - * @param slave_id Port ID of valid slave device. + * @param member_id Port ID of valid member device. * @param lacp_pkt mbuf containing LACPDU. * * @return * 0 on success, negative value otherwise. */ int -rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id, +rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t member_id, struct rte_mbuf *lacp_pkt); /** - * Enable dedicated hw queues for 802.3ad control plane traffic on slaves + * Enable dedicated hw queues for 802.3ad control plane traffic on members * - * This function creates an additional tx and rx queue on each slave for + * This function creates an additional tx and rx queue on each member for * dedicated 802.3ad control plane traffic . A flow filtering rule is - * programmed on each slave to redirect all LACP slow packets to that rx queue + * programmed on each member to redirect all LACP slow packets to that rx queue * for processing in the LACP state machine, this removes the need to filter - * these packets in the bonded devices data path. The additional tx queue is + * these packets in the bonding devices data path. The additional tx queue is * used to enable the LACP state machine to enqueue LACP packets directly to - * slave hw independently of the bonded devices data path. + * member hw independently of the bonding devices data path. * - * To use this feature all slaves must support the programming of the flow + * To use this feature all members must support the programming of the flow * filter rule required for rx and have enough queues that one rx and tx queue * can be reserved for the LACP state machines control packets. * @@ -296,7 +293,7 @@ int rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port_id); /** - * Disable slow queue on slaves + * Disable slow queue on members * * This function disables hardware slow packet filter. * @@ -331,4 +328,9 @@ rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id); int rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id, enum rte_bond_8023ad_agg_selection agg_selection); + +#ifdef __cplusplus +} +#endif + #endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c index 86335a79713..56945e23496 100644 --- a/drivers/net/bonding/rte_eth_bond_alb.c +++ b/drivers/net/bonding/rte_eth_bond_alb.c @@ -19,13 +19,13 @@ simple_hash(uint8_t *hash_start, int hash_size) } static uint16_t -calculate_slave(struct bond_dev_private *internals) +calculate_member(struct bond_dev_private *internals) { uint16_t idx; - idx = (internals->mode6.last_slave + 1) % internals->active_slave_count; - internals->mode6.last_slave = idx; - return internals->active_slaves[idx]; + idx = (internals->mode6.last_member + 1) % internals->active_member_count; + internals->mode6.last_member = idx; + return internals->active_members[idx]; } int @@ -41,7 +41,7 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev) /* Fill hash table with initial values */ memset(hash_table, 0, sizeof(struct client_data) * ALB_HASH_TABLE_SIZE); rte_spinlock_init(&internals->mode6.lock); - internals->mode6.last_slave = ALB_NULL_INDEX; + internals->mode6.last_member = ALB_NULL_INDEX; internals->mode6.ntt = 0; /* Initialize memory pool for ARP packets to send */ @@ -96,7 +96,7 @@ void bond_mode_alb_arp_recv(struct rte_ether_hdr *eth_h, uint16_t offset, /* * We got reply for ARP Request send by the application. We need to * update client table when received data differ from what is stored - * in ALB table and issue sending update packet to that slave. + * in ALB table and issue sending update packet to that member. */ rte_spinlock_lock(&internals->mode6.lock); if (client_info->in_use == 0 || @@ -112,8 +112,8 @@ void bond_mode_alb_arp_recv(struct rte_ether_hdr *eth_h, uint16_t offset, client_info->cli_ip = arp->arp_data.arp_sip; rte_ether_addr_copy(&arp->arp_data.arp_sha, &client_info->cli_mac); - client_info->slave_idx = calculate_slave(internals); - rte_eth_macaddr_get(client_info->slave_idx, + client_info->member_idx = calculate_member(internals); + rte_eth_macaddr_get(client_info->member_idx, &client_info->app_mac); rte_ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_tha); @@ -166,33 +166,33 @@ bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset, &arp->arp_data.arp_tha, &client_info->cli_mac); } - rte_eth_macaddr_get(client_info->slave_idx, + rte_eth_macaddr_get(client_info->member_idx, &client_info->app_mac); rte_ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_sha); memcpy(client_info->vlan, eth_h + 1, offset); client_info->vlan_count = offset / sizeof(struct rte_vlan_hdr); rte_spinlock_unlock(&internals->mode6.lock); - return client_info->slave_idx; + return client_info->member_idx; } } - /* Assign new slave to this client and update src mac in ARP */ + /* Assign new member to this client and update src mac in ARP */ client_info->in_use = 1; client_info->ntt = 0; client_info->app_ip = arp->arp_data.arp_sip; rte_ether_addr_copy(&arp->arp_data.arp_tha, &client_info->cli_mac); client_info->cli_ip = arp->arp_data.arp_tip; - client_info->slave_idx = calculate_slave(internals); - rte_eth_macaddr_get(client_info->slave_idx, + client_info->member_idx = calculate_member(internals); + rte_eth_macaddr_get(client_info->member_idx, &client_info->app_mac); rte_ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_sha); memcpy(client_info->vlan, eth_h + 1, offset); client_info->vlan_count = offset / sizeof(struct rte_vlan_hdr); rte_spinlock_unlock(&internals->mode6.lock); - return client_info->slave_idx; + return client_info->member_idx; } /* If packet is not ARP Reply, send it on current primary port. */ @@ -208,7 +208,7 @@ bond_mode_alb_arp_upd(struct client_data *client_info, { struct rte_ether_hdr *eth_h; struct rte_arp_hdr *arp_h; - uint16_t slave_idx; + uint16_t member_idx; rte_spinlock_lock(&internals->mode6.lock); eth_h = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); @@ -238,10 +238,10 @@ bond_mode_alb_arp_upd(struct client_data *client_info, arp_h->arp_plen = sizeof(uint32_t); arp_h->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); - slave_idx = client_info->slave_idx; + member_idx = client_info->member_idx; rte_spinlock_unlock(&internals->mode6.lock); - return slave_idx; + return member_idx; } void @@ -252,18 +252,18 @@ bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev) int i; - /* If active slave count is 0, it's pointless to refresh alb table */ - if (internals->active_slave_count <= 0) + /* If active member count is 0, it's pointless to refresh alb table */ + if (internals->active_member_count <= 0) return; rte_spinlock_lock(&internals->mode6.lock); - internals->mode6.last_slave = ALB_NULL_INDEX; + internals->mode6.last_member = ALB_NULL_INDEX; for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) { client_info = &internals->mode6.client_table[i]; if (client_info->in_use) { - client_info->slave_idx = calculate_slave(internals); - rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac); + client_info->member_idx = calculate_member(internals); + rte_eth_macaddr_get(client_info->member_idx, &client_info->app_mac); internals->mode6.ntt = 1; } } diff --git a/drivers/net/bonding/rte_eth_bond_alb.h b/drivers/net/bonding/rte_eth_bond_alb.h index 4e9aeda9bc8..f51c355d369 100644 --- a/drivers/net/bonding/rte_eth_bond_alb.h +++ b/drivers/net/bonding/rte_eth_bond_alb.h @@ -22,8 +22,8 @@ struct client_data { uint32_t cli_ip; /**< Client IP address */ - uint16_t slave_idx; - /**< Index of slave on which we connect with that client */ + uint16_t member_idx; + /**< Index of member on which we connect with that client */ uint8_t in_use; /**< Flag indicating if entry in client table is currently used */ uint8_t ntt; @@ -42,8 +42,8 @@ struct mode_alb_private { /**< Mempool for creating ARP update packets */ uint8_t ntt; /**< Flag indicating if we need to send update to any client on next tx */ - uint32_t last_slave; - /**< Index of last used slave in client table */ + uint32_t last_member; + /**< Index of last used member in client table */ rte_spinlock_t lock; }; @@ -72,9 +72,9 @@ bond_mode_alb_arp_recv(struct rte_ether_hdr *eth_h, uint16_t offset, struct bond_dev_private *internals); /** - * Function handles ARP packet transmission. It also decides on which slave - * send that packet. If packet is ARP Request, it is send on primary slave. - * If it is ARP Reply, it is send on slave stored in client table for that + * Function handles ARP packet transmission. It also decides on which member + * send that packet. If packet is ARP Request, it is send on primary member. + * If it is ARP Reply, it is send on member stored in client table for that * connection. On Reply function also updates data in client table. * * @param eth_h ETH header of transmitted packet. @@ -82,7 +82,7 @@ bond_mode_alb_arp_recv(struct rte_ether_hdr *eth_h, uint16_t offset, * @param internals Bonding data. * * @return - * Index of slave on which packet should be sent. + * Index of member on which packet should be sent. */ uint16_t bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset, @@ -96,16 +96,16 @@ bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset, * @param internals Bonding data. * * @return - * Index of slave on which packet should be sent. + * Index of member on which packet should be sent. */ uint16_t bond_mode_alb_arp_upd(struct client_data *client_info, struct rte_mbuf *pkt, struct bond_dev_private *internals); /** - * Function updates slave indexes of active connections. + * Function updates member indexes of active connections. * - * @param bond_dev Pointer to bonded device struct. + * @param bond_dev Pointer to bonding device struct. */ void bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev); diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index 8b6cdce34a5..99e496556aa 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -16,7 +16,7 @@ #include "eth_bond_8023ad_private.h" int -check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev) +check_for_bonding_ethdev(const struct rte_eth_dev *eth_dev) { /* Check valid pointer */ if (eth_dev == NULL || @@ -30,48 +30,48 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev) } int -valid_bonded_port_id(uint16_t port_id) +valid_bonding_port_id(uint16_t port_id) { RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); - return check_for_bonded_ethdev(&rte_eth_devices[port_id]); + return check_for_bonding_ethdev(&rte_eth_devices[port_id]); } int -check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev) +check_for_main_bonding_ethdev(const struct rte_eth_dev *eth_dev) { int i; struct bond_dev_private *internals; - if (check_for_bonded_ethdev(eth_dev) != 0) + if (check_for_bonding_ethdev(eth_dev) != 0) return 0; internals = eth_dev->data->dev_private; - /* Check if any of slave devices is a bonded device */ - for (i = 0; i < internals->slave_count; i++) - if (valid_bonded_port_id(internals->slaves[i].port_id) == 0) + /* Check if any of member devices is a bonding device */ + for (i = 0; i < internals->member_count; i++) + if (valid_bonding_port_id(internals->members[i].port_id) == 0) return 1; return 0; } int -valid_slave_port_id(struct bond_dev_private *internals, uint16_t slave_port_id) +valid_member_port_id(struct bond_dev_private *internals, uint16_t member_port_id) { - RTE_ETH_VALID_PORTID_OR_ERR_RET(slave_port_id, -1); + RTE_ETH_VALID_PORTID_OR_ERR_RET(member_port_id, -1); - /* Verify that slave_port_id refers to a non bonded port */ - if (check_for_bonded_ethdev(&rte_eth_devices[slave_port_id]) == 0 && + /* Verify that member_port_id refers to a non bonding port */ + if (check_for_bonding_ethdev(&rte_eth_devices[member_port_id]) == 0 && internals->mode == BONDING_MODE_8023AD) { - RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad" - " mode as slave is also a bonded device, only " + RTE_BOND_LOG(ERR, "Cannot add member to bonding device in 802.3ad" + " mode as member is also a bonding device, only " "physical devices can be support in this mode."); return -1; } - if (internals->port_id == slave_port_id) { + if (internals->port_id == member_port_id) { RTE_BOND_LOG(ERR, - "Cannot add the bonded device itself as its slave."); + "Cannot add the bonding device itself as its member."); return -1; } @@ -79,61 +79,63 @@ valid_slave_port_id(struct bond_dev_private *internals, uint16_t slave_port_id) } void -activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id) +activate_member(struct rte_eth_dev *eth_dev, uint16_t port_id) { struct bond_dev_private *internals = eth_dev->data->dev_private; - uint16_t active_count = internals->active_slave_count; + uint16_t active_count = internals->active_member_count; if (internals->mode == BONDING_MODE_8023AD) - bond_mode_8023ad_activate_slave(eth_dev, port_id); + bond_mode_8023ad_activate_member(eth_dev, port_id); if (internals->mode == BONDING_MODE_TLB || internals->mode == BONDING_MODE_ALB) { - internals->tlb_slaves_order[active_count] = port_id; + internals->tlb_members_order[active_count] = port_id; } - RTE_ASSERT(internals->active_slave_count < - (RTE_DIM(internals->active_slaves) - 1)); + RTE_ASSERT(internals->active_member_count < + (RTE_DIM(internals->active_members) - 1)); - internals->active_slaves[internals->active_slave_count] = port_id; - internals->active_slave_count++; + internals->active_members[internals->active_member_count] = port_id; + internals->active_member_count++; if (internals->mode == BONDING_MODE_TLB) - bond_tlb_activate_slave(internals); + bond_tlb_activate_member(internals); if (internals->mode == BONDING_MODE_ALB) bond_mode_alb_client_list_upd(eth_dev); } void -deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id) +deactivate_member(struct rte_eth_dev *eth_dev, uint16_t port_id) { - uint16_t slave_pos; + uint16_t member_pos; struct bond_dev_private *internals = eth_dev->data->dev_private; - uint16_t active_count = internals->active_slave_count; + uint16_t active_count = internals->active_member_count; if (internals->mode == BONDING_MODE_8023AD) { bond_mode_8023ad_stop(eth_dev); - bond_mode_8023ad_deactivate_slave(eth_dev, port_id); + bond_mode_8023ad_deactivate_member(eth_dev, port_id); } else if (internals->mode == BONDING_MODE_TLB || internals->mode == BONDING_MODE_ALB) bond_tlb_disable(internals); - slave_pos = find_slave_by_id(internals->active_slaves, active_count, + member_pos = find_member_by_id(internals->active_members, active_count, port_id); - /* If slave was not at the end of the list - * shift active slaves up active array list */ - if (slave_pos < active_count) { + /* + * If member was not at the end of the list + * shift active members up active array list. + */ + if (member_pos < active_count) { active_count--; - memmove(internals->active_slaves + slave_pos, - internals->active_slaves + slave_pos + 1, - (active_count - slave_pos) * - sizeof(internals->active_slaves[0])); + memmove(internals->active_members + member_pos, + internals->active_members + member_pos + 1, + (active_count - member_pos) * + sizeof(internals->active_members[0])); } - RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves)); - internals->active_slave_count = active_count; + RTE_ASSERT(active_count < RTE_DIM(internals->active_members)); + internals->active_member_count = active_count; if (eth_dev->data->dev_started) { if (internals->mode == BONDING_MODE_8023AD) { @@ -192,9 +194,9 @@ rte_eth_bond_free(const char *name) } static int -slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) +member_vlan_filter_set(uint16_t bonding_port_id, uint16_t member_port_id) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; int found; int res = 0; @@ -202,12 +204,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) uint32_t pos = 0; uint16_t first; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - if ((bonded_eth_dev->data->dev_conf.rxmode.offloads & + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + if ((bonding_eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0) return 0; - internals = bonded_eth_dev->data->dev_private; + internals = bonding_eth_dev->data->dev_private; found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab); first = pos; @@ -224,7 +226,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) if (unlikely(slab & mask)) { uint16_t vlan_id = pos + i; - res = rte_eth_dev_vlan_filter(slave_port_id, + res = rte_eth_dev_vlan_filter(member_port_id, vlan_id, 1); } } @@ -236,45 +238,45 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) } static int -slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) +member_rte_flow_prepare(uint16_t member_id, struct bond_dev_private *internals) { struct rte_flow *flow; struct rte_flow_error ferror; - uint16_t slave_port_id = internals->slaves[slave_id].port_id; + uint16_t member_port_id = internals->members[member_id].port_id; if (internals->flow_isolated_valid != 0) { - if (rte_eth_dev_stop(slave_port_id) != 0) { + if (rte_eth_dev_stop(member_port_id) != 0) { RTE_BOND_LOG(ERR, "Failed to stop device on port %u", - slave_port_id); + member_port_id); return -1; } - if (rte_flow_isolate(slave_port_id, internals->flow_isolated, + if (rte_flow_isolate(member_port_id, internals->flow_isolated, &ferror)) { - RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave" - " %d: %s", slave_id, ferror.message ? + RTE_BOND_LOG(ERR, "rte_flow_isolate failed for member" + " %d: %s", member_id, ferror.message ? ferror.message : "(no stated reason)"); return -1; } } TAILQ_FOREACH(flow, &internals->flow_list, next) { - flow->flows[slave_id] = rte_flow_create(slave_port_id, + flow->flows[member_id] = rte_flow_create(member_port_id, flow->rule.attr, flow->rule.pattern, flow->rule.actions, &ferror); - if (flow->flows[slave_id] == NULL) { - RTE_BOND_LOG(ERR, "Cannot create flow for slave" - " %d: %s", slave_id, + if (flow->flows[member_id] == NULL) { + RTE_BOND_LOG(ERR, "Cannot create flow for member" + " %d: %s", member_id, ferror.message ? ferror.message : "(no stated reason)"); - /* Destroy successful bond flows from the slave */ + /* Destroy successful bond flows from the member */ TAILQ_FOREACH(flow, &internals->flow_list, next) { - if (flow->flows[slave_id] != NULL) { - rte_flow_destroy(slave_port_id, - flow->flows[slave_id], + if (flow->flows[member_id] != NULL) { + rte_flow_destroy(member_port_id, + flow->flows[member_id], &ferror); - flow->flows[slave_id] = NULL; + flow->flows[member_id] = NULL; } } return -1; @@ -284,7 +286,7 @@ slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) } static void -eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals, +eth_bond_member_inherit_dev_info_rx_first(struct bond_dev_private *internals, const struct rte_eth_dev_info *di) { struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; @@ -292,20 +294,20 @@ eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals, internals->reta_size = di->reta_size; internals->rss_key_len = di->hash_key_size; - /* Inherit Rx offload capabilities from the first slave device */ + /* Inherit Rx offload capabilities from the first member device */ internals->rx_offload_capa = di->rx_offload_capa; internals->rx_queue_offload_capa = di->rx_queue_offload_capa; internals->flow_type_rss_offloads = di->flow_type_rss_offloads; - /* Inherit maximum Rx packet size from the first slave device */ + /* Inherit maximum Rx packet size from the first member device */ internals->candidate_max_rx_pktlen = di->max_rx_pktlen; - /* Inherit default Rx queue settings from the first slave device */ + /* Inherit default Rx queue settings from the first member device */ memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i)); /* * Turn off descriptor prefetch and writeback by default for all - * slave devices. Applications may tweak this setting if need be. + * member devices. Applications may tweak this setting if need be. */ rxconf_i->rx_thresh.pthresh = 0; rxconf_i->rx_thresh.hthresh = 0; @@ -314,26 +316,26 @@ eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals, /* Setting this to zero should effectively enable default values */ rxconf_i->rx_free_thresh = 0; - /* Disable deferred start by default for all slave devices */ + /* Disable deferred start by default for all member devices */ rxconf_i->rx_deferred_start = 0; } static void -eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals, +eth_bond_member_inherit_dev_info_tx_first(struct bond_dev_private *internals, const struct rte_eth_dev_info *di) { struct rte_eth_txconf *txconf_i = &internals->default_txconf; - /* Inherit Tx offload capabilities from the first slave device */ + /* Inherit Tx offload capabilities from the first member device */ internals->tx_offload_capa = di->tx_offload_capa; internals->tx_queue_offload_capa = di->tx_queue_offload_capa; - /* Inherit default Tx queue settings from the first slave device */ + /* Inherit default Tx queue settings from the first member device */ memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i)); /* * Turn off descriptor prefetch and writeback by default for all - * slave devices. Applications may tweak this setting if need be. + * member devices. Applications may tweak this setting if need be. */ txconf_i->tx_thresh.pthresh = 0; txconf_i->tx_thresh.hthresh = 0; @@ -341,17 +343,17 @@ eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals, /* * Setting these parameters to zero assumes that default - * values will be configured implicitly by slave devices. + * values will be configured implicitly by member devices. */ txconf_i->tx_free_thresh = 0; txconf_i->tx_rs_thresh = 0; - /* Disable deferred start by default for all slave devices */ + /* Disable deferred start by default for all member devices */ txconf_i->tx_deferred_start = 0; } static void -eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, +eth_bond_member_inherit_dev_info_rx_next(struct bond_dev_private *internals, const struct rte_eth_dev_info *di) { struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; @@ -362,32 +364,32 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, internals->flow_type_rss_offloads &= di->flow_type_rss_offloads; /* - * If at least one slave device suggests enabling this - * setting by default, enable it for all slave devices + * If at least one member device suggests enabling this + * setting by default, enable it for all member devices * since disabling it may not be necessarily supported. */ if (rxconf->rx_drop_en == 1) rxconf_i->rx_drop_en = 1; /* - * Adding a new slave device may cause some of previously inherited + * Adding a new member device may cause some of previously inherited * offloads to be withdrawn from the internal rx_queue_offload_capa * value. Thus, the new internal value of default Rx queue offloads * has to be masked by rx_queue_offload_capa to make sure that only * commonly supported offloads are preserved from both the previous - * value and the value being inherited from the new slave device. + * value and the value being inherited from the new member device. */ rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) & internals->rx_queue_offload_capa; /* - * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be + * RETA size is GCD of all members RETA sizes, so, if all sizes will be * the power of 2, the lower one is GCD */ if (internals->reta_size > di->reta_size) internals->reta_size = di->reta_size; if (internals->rss_key_len > di->hash_key_size) { - RTE_BOND_LOG(WARNING, "slave has different rss key size, " + RTE_BOND_LOG(WARNING, "member has different rss key size, " "configuring rss may fail"); internals->rss_key_len = di->hash_key_size; } @@ -398,7 +400,7 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, } static void -eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals, +eth_bond_member_inherit_dev_info_tx_next(struct bond_dev_private *internals, const struct rte_eth_dev_info *di) { struct rte_eth_txconf *txconf_i = &internals->default_txconf; @@ -408,34 +410,34 @@ eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals, internals->tx_queue_offload_capa &= di->tx_queue_offload_capa; /* - * Adding a new slave device may cause some of previously inherited + * Adding a new member device may cause some of previously inherited * offloads to be withdrawn from the internal tx_queue_offload_capa * value. Thus, the new internal value of default Tx queue offloads * has to be masked by tx_queue_offload_capa to make sure that only * commonly supported offloads are preserved from both the previous - * value and the value being inherited from the new slave device. + * value and the value being inherited from the new member device. */ txconf_i->offloads = (txconf_i->offloads | txconf->offloads) & internals->tx_queue_offload_capa; } static void -eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim, - const struct rte_eth_desc_lim *slave_desc_lim) +eth_bond_member_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim, + const struct rte_eth_desc_lim *member_desc_lim) { - memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim)); + memcpy(bond_desc_lim, member_desc_lim, sizeof(*bond_desc_lim)); } static int -eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim, - const struct rte_eth_desc_lim *slave_desc_lim) +eth_bond_member_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim, + const struct rte_eth_desc_lim *member_desc_lim) { bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max, - slave_desc_lim->nb_max); + member_desc_lim->nb_max); bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min, - slave_desc_lim->nb_min); + member_desc_lim->nb_min); bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align, - slave_desc_lim->nb_align); + member_desc_lim->nb_align); if (bond_desc_lim->nb_min > bond_desc_lim->nb_max || bond_desc_lim->nb_align > bond_desc_lim->nb_max) { @@ -444,102 +446,104 @@ eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim, } /* Treat maximum number of segments equal to 0 as unspecified */ - if (slave_desc_lim->nb_seg_max != 0 && + if (member_desc_lim->nb_seg_max != 0 && (bond_desc_lim->nb_seg_max == 0 || - slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max)) - bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max; - if (slave_desc_lim->nb_mtu_seg_max != 0 && + member_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max)) + bond_desc_lim->nb_seg_max = member_desc_lim->nb_seg_max; + if (member_desc_lim->nb_mtu_seg_max != 0 && (bond_desc_lim->nb_mtu_seg_max == 0 || - slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max)) - bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max; + member_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max)) + bond_desc_lim->nb_mtu_seg_max = member_desc_lim->nb_mtu_seg_max; return 0; } static int -__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) +__eth_bond_member_add_lock_free(uint16_t bonding_port_id, uint16_t member_port_id) { - struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev; + struct rte_eth_dev *bonding_eth_dev, *member_eth_dev; struct bond_dev_private *internals; struct rte_eth_link link_props; struct rte_eth_dev_info dev_info; int ret; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - internals = bonded_eth_dev->data->dev_private; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + internals = bonding_eth_dev->data->dev_private; - if (valid_slave_port_id(internals, slave_port_id) != 0) + if (valid_member_port_id(internals, member_port_id) != 0) return -1; - slave_eth_dev = &rte_eth_devices[slave_port_id]; - if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDING_MEMBER) { - RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device"); + member_eth_dev = &rte_eth_devices[member_port_id]; + if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDING_MEMBER) { + RTE_BOND_LOG(ERR, "Member device is already a member of a bonding device"); return -1; } - ret = rte_eth_dev_info_get(slave_port_id, &dev_info); + ret = rte_eth_dev_info_get(member_port_id, &dev_info); if (ret != 0) { RTE_BOND_LOG(ERR, "%s: Error during getting device (port %u) info: %s\n", - __func__, slave_port_id, strerror(-ret)); + __func__, member_port_id, strerror(-ret)); return ret; } if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) { - RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small", - slave_port_id); + RTE_BOND_LOG(ERR, "Member (port %u) max_rx_pktlen too small", + member_port_id); return -1; } - slave_add(internals, slave_eth_dev); + member_add(internals, member_eth_dev); - /* We need to store slaves reta_size to be able to synchronize RETA for all - * slave devices even if its sizes are different. + /* We need to store members reta_size to be able to synchronize RETA for all + * member devices even if its sizes are different. */ - internals->slaves[internals->slave_count].reta_size = dev_info.reta_size; + internals->members[internals->member_count].reta_size = dev_info.reta_size; - if (internals->slave_count < 1) { - /* if MAC is not user defined then use MAC of first slave add to - * bonded device */ + if (internals->member_count < 1) { + /* + * if MAC is not user defined then use MAC of first member add to + * bonding device. + */ if (!internals->user_defined_mac) { - if (mac_address_set(bonded_eth_dev, - slave_eth_dev->data->mac_addrs)) { + if (mac_address_set(bonding_eth_dev, + member_eth_dev->data->mac_addrs)) { RTE_BOND_LOG(ERR, "Failed to set MAC address"); return -1; } } - /* Make primary slave */ - internals->primary_port = slave_port_id; - internals->current_primary_port = slave_port_id; + /* Make primary member */ + internals->primary_port = member_port_id; + internals->current_primary_port = member_port_id; internals->speed_capa = dev_info.speed_capa; - /* Inherit queues settings from first slave */ - internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues; - internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues; + /* Inherit queues settings from first member */ + internals->nb_rx_queues = member_eth_dev->data->nb_rx_queues; + internals->nb_tx_queues = member_eth_dev->data->nb_tx_queues; - eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info); - eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info); + eth_bond_member_inherit_dev_info_rx_first(internals, &dev_info); + eth_bond_member_inherit_dev_info_tx_first(internals, &dev_info); - eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim, + eth_bond_member_inherit_desc_lim_first(&internals->rx_desc_lim, &dev_info.rx_desc_lim); - eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim, + eth_bond_member_inherit_desc_lim_first(&internals->tx_desc_lim, &dev_info.tx_desc_lim); } else { int ret; internals->speed_capa &= dev_info.speed_capa; - eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info); - eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info); + eth_bond_member_inherit_dev_info_rx_next(internals, &dev_info); + eth_bond_member_inherit_dev_info_tx_next(internals, &dev_info); - ret = eth_bond_slave_inherit_desc_lim_next( - &internals->rx_desc_lim, &dev_info.rx_desc_lim); + ret = eth_bond_member_inherit_desc_lim_next(&internals->rx_desc_lim, + &dev_info.rx_desc_lim); if (ret != 0) return ret; - ret = eth_bond_slave_inherit_desc_lim_next( - &internals->tx_desc_lim, &dev_info.tx_desc_lim); + ret = eth_bond_member_inherit_desc_lim_next(&internals->tx_desc_lim, + &dev_info.tx_desc_lim); if (ret != 0) return ret; } @@ -549,100 +553,104 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) internals->mode == BONDING_MODE_BROADCAST) internals->tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; - bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= + bonding_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= internals->flow_type_rss_offloads; - if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) { - RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d", - slave_port_id); + if (member_rte_flow_prepare(internals->member_count, internals) != 0) { + RTE_BOND_LOG(ERR, "Failed to prepare new member flows: port=%d", + member_port_id); return -1; } - /* Add additional MAC addresses to the slave */ - if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) { - RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu", - slave_port_id); + /* Add additional MAC addresses to the member */ + if (member_add_mac_addresses(bonding_eth_dev, member_port_id) != 0) { + RTE_BOND_LOG(ERR, "Failed to add mac address(es) to member %hu", + member_port_id); return -1; } - internals->slave_count++; + internals->member_count++; - if (bonded_eth_dev->data->dev_started) { - if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) { - internals->slave_count--; - RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d", - slave_port_id); + if (bonding_eth_dev->data->dev_started) { + if (member_configure(bonding_eth_dev, member_eth_dev) != 0) { + internals->member_count--; + RTE_BOND_LOG(ERR, "rte_bond_members_configure: port=%d", + member_port_id); return -1; } - if (slave_start(bonded_eth_dev, slave_eth_dev) != 0) { - internals->slave_count--; - RTE_BOND_LOG(ERR, "rte_bond_slaves_start: port=%d", - slave_port_id); + if (member_start(bonding_eth_dev, member_eth_dev) != 0) { + internals->member_count--; + RTE_BOND_LOG(ERR, "rte_bond_members_start: port=%d", + member_port_id); return -1; } } - /* Update all slave devices MACs */ - mac_address_slaves_update(bonded_eth_dev); + /* Update all member devices MACs */ + mac_address_members_update(bonding_eth_dev); - /* Register link status change callback with bonded device pointer as - * argument*/ - rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC, - bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id); + /* + * Register link status change callback with bonding device pointer as + * argument. + */ + rte_eth_dev_callback_register(member_port_id, RTE_ETH_EVENT_INTR_LSC, + bond_ethdev_lsc_event_callback, &bonding_eth_dev->data->port_id); - /* If bonded device is started then we can add the slave to our active - * slave array */ - if (bonded_eth_dev->data->dev_started) { - ret = rte_eth_link_get_nowait(slave_port_id, &link_props); + /* + * If bonding device is started then we can add the member to our active + * member array. + */ + if (bonding_eth_dev->data->dev_started) { + ret = rte_eth_link_get_nowait(member_port_id, &link_props); if (ret < 0) { - rte_eth_dev_callback_unregister(slave_port_id, + rte_eth_dev_callback_unregister(member_port_id, RTE_ETH_EVENT_INTR_LSC, bond_ethdev_lsc_event_callback, - &bonded_eth_dev->data->port_id); - internals->slave_count--; + &bonding_eth_dev->data->port_id); + internals->member_count--; RTE_BOND_LOG(ERR, - "Slave (port %u) link get failed: %s\n", - slave_port_id, rte_strerror(-ret)); + "Member (port %u) link get failed: %s\n", + member_port_id, rte_strerror(-ret)); return -1; } if (link_props.link_status == RTE_ETH_LINK_UP) { - if (internals->active_slave_count == 0 && + if (internals->active_member_count == 0 && !internals->user_defined_primary_port) bond_ethdev_primary_set(internals, - slave_port_id); + member_port_id); } } - /* Add slave details to bonded device */ - slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDING_MEMBER; + /* Add member details to bonding device */ + member_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDING_MEMBER; - slave_vlan_filter_set(bonded_port_id, slave_port_id); + member_vlan_filter_set(bonding_port_id, member_port_id); return 0; } int -rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id) +rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; int retval; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - internals = bonded_eth_dev->data->dev_private; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + internals = bonding_eth_dev->data->dev_private; - if (valid_slave_port_id(internals, slave_port_id) != 0) + if (valid_member_port_id(internals, member_port_id) != 0) return -1; rte_spinlock_lock(&internals->lock); - retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id); + retval = __eth_bond_member_add_lock_free(bonding_port_id, member_port_id); rte_spinlock_unlock(&internals->lock); @@ -650,103 +658,105 @@ rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id) } static int -__eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, - uint16_t slave_port_id) +__eth_bond_member_remove_lock_free(uint16_t bonding_port_id, + uint16_t member_port_id) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; - struct rte_eth_dev *slave_eth_dev; + struct rte_eth_dev *member_eth_dev; struct rte_flow_error flow_error; struct rte_flow *flow; - int i, slave_idx; + int i, member_idx; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - internals = bonded_eth_dev->data->dev_private; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + internals = bonding_eth_dev->data->dev_private; - if (valid_slave_port_id(internals, slave_port_id) < 0) + if (valid_member_port_id(internals, member_port_id) < 0) return -1; - /* first remove from active slave list */ - slave_idx = find_slave_by_id(internals->active_slaves, - internals->active_slave_count, slave_port_id); + /* first remove from active member list */ + member_idx = find_member_by_id(internals->active_members, + internals->active_member_count, member_port_id); - if (slave_idx < internals->active_slave_count) - deactivate_slave(bonded_eth_dev, slave_port_id); + if (member_idx < internals->active_member_count) + deactivate_member(bonding_eth_dev, member_port_id); - slave_idx = -1; - /* now find in slave list */ - for (i = 0; i < internals->slave_count; i++) - if (internals->slaves[i].port_id == slave_port_id) { - slave_idx = i; + member_idx = -1; + /* now find in member list */ + for (i = 0; i < internals->member_count; i++) + if (internals->members[i].port_id == member_port_id) { + member_idx = i; break; } - if (slave_idx < 0) { - RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %u", - internals->slave_count); + if (member_idx < 0) { + RTE_BOND_LOG(ERR, "Could not find member in port list, member count %u", + internals->member_count); return -1; } - /* Un-register link status change callback with bonded device pointer as + /* Un-register link status change callback with bonding device pointer as * argument*/ - rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC, + rte_eth_dev_callback_unregister(member_port_id, RTE_ETH_EVENT_INTR_LSC, bond_ethdev_lsc_event_callback, - &rte_eth_devices[bonded_port_id].data->port_id); + &rte_eth_devices[bonding_port_id].data->port_id); - /* Restore original MAC address of slave device */ - rte_eth_dev_default_mac_addr_set(slave_port_id, - &(internals->slaves[slave_idx].persisted_mac_addr)); + /* Restore original MAC address of member device */ + rte_eth_dev_default_mac_addr_set(member_port_id, + &internals->members[member_idx].persisted_mac_addr); - /* remove additional MAC addresses from the slave */ - slave_remove_mac_addresses(bonded_eth_dev, slave_port_id); + /* remove additional MAC addresses from the member */ + member_remove_mac_addresses(bonding_eth_dev, member_port_id); /* - * Remove bond device flows from slave device. + * Remove bond device flows from member device. * Note: don't restore flow isolate mode. */ TAILQ_FOREACH(flow, &internals->flow_list, next) { - if (flow->flows[slave_idx] != NULL) { - rte_flow_destroy(slave_port_id, flow->flows[slave_idx], + if (flow->flows[member_idx] != NULL) { + rte_flow_destroy(member_port_id, flow->flows[member_idx], &flow_error); - flow->flows[slave_idx] = NULL; + flow->flows[member_idx] = NULL; } } /* Remove the dedicated queues flow */ if (internals->mode == BONDING_MODE_8023AD && internals->mode4.dedicated_queues.enabled == 1 && - internals->mode4.dedicated_queues.flow[slave_port_id] != NULL) { - rte_flow_destroy(slave_port_id, - internals->mode4.dedicated_queues.flow[slave_port_id], + internals->mode4.dedicated_queues.flow[member_port_id] != NULL) { + rte_flow_destroy(member_port_id, + internals->mode4.dedicated_queues.flow[member_port_id], &flow_error); - internals->mode4.dedicated_queues.flow[slave_port_id] = NULL; + internals->mode4.dedicated_queues.flow[member_port_id] = NULL; } - slave_eth_dev = &rte_eth_devices[slave_port_id]; - slave_remove(internals, slave_eth_dev); - slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDING_MEMBER); + member_eth_dev = &rte_eth_devices[member_port_id]; + member_remove(internals, member_eth_dev); + member_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDING_MEMBER); - /* first slave in the active list will be the primary by default, + /* first member in the active list will be the primary by default, * otherwise use first device in list */ - if (internals->current_primary_port == slave_port_id) { - if (internals->active_slave_count > 0) - internals->current_primary_port = internals->active_slaves[0]; - else if (internals->slave_count > 0) - internals->current_primary_port = internals->slaves[0].port_id; + if (internals->current_primary_port == member_port_id) { + if (internals->active_member_count > 0) + internals->current_primary_port = internals->active_members[0]; + else if (internals->member_count > 0) + internals->current_primary_port = internals->members[0].port_id; else internals->primary_port = 0; - mac_address_slaves_update(bonded_eth_dev); + mac_address_members_update(bonding_eth_dev); } - if (internals->active_slave_count < 1) { - /* if no slaves are any longer attached to bonded device and MAC is not - * user defined then clear MAC of bonded device as it will be reset - * when a new slave is added */ - if (internals->slave_count < 1 && !internals->user_defined_mac) - memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0, - sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs))); + if (internals->active_member_count < 1) { + /* + * if no members are any longer attached to bonding device and MAC is not + * user defined then clear MAC of bonding device as it will be reset + * when a new member is added. + */ + if (internals->member_count < 1 && !internals->user_defined_mac) + memset(rte_eth_devices[bonding_port_id].data->mac_addrs, 0, + sizeof(*rte_eth_devices[bonding_port_id].data->mac_addrs)); } - if (internals->slave_count == 0) { + if (internals->member_count == 0) { internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; internals->rx_queue_offload_capa = 0; @@ -760,21 +770,21 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, } int -rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id) +rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; int retval; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - internals = bonded_eth_dev->data->dev_private; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + internals = bonding_eth_dev->data->dev_private; rte_spinlock_lock(&internals->lock); - retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id); + retval = __eth_bond_member_remove_lock_free(bonding_port_id, member_port_id); rte_spinlock_unlock(&internals->lock); @@ -782,195 +792,195 @@ rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id) } int -rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode) +rte_eth_bond_mode_set(uint16_t bonding_port_id, uint8_t mode) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; - if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 && + if (check_for_main_bonding_ethdev(bonding_eth_dev) != 0 && mode == BONDING_MODE_8023AD) return -1; - return bond_ethdev_mode_set(bonded_eth_dev, mode); + return bond_ethdev_mode_set(bonding_eth_dev, mode); } int -rte_eth_bond_mode_get(uint16_t bonded_port_id) +rte_eth_bond_mode_get(uint16_t bonding_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; return internals->mode; } int -rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id) +rte_eth_bond_primary_set(uint16_t bonding_port_id, uint16_t member_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; - if (valid_slave_port_id(internals, slave_port_id) != 0) + if (valid_member_port_id(internals, member_port_id) != 0) return -1; internals->user_defined_primary_port = 1; - internals->primary_port = slave_port_id; + internals->primary_port = member_port_id; - bond_ethdev_primary_set(internals, slave_port_id); + bond_ethdev_primary_set(internals, member_port_id); return 0; } int -rte_eth_bond_primary_get(uint16_t bonded_port_id) +rte_eth_bond_primary_get(uint16_t bonding_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; - if (internals->slave_count < 1) + if (internals->member_count < 1) return -1; return internals->current_primary_port; } int -rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], +rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[], uint16_t len) { struct bond_dev_private *internals; uint16_t i; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - if (slaves == NULL) + if (members == NULL) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; - if (internals->slave_count > len) + if (internals->member_count > len) return -1; - for (i = 0; i < internals->slave_count; i++) - slaves[i] = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) + members[i] = internals->members[i].port_id; - return internals->slave_count; + return internals->member_count; } int -rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], +rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[], uint16_t len) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - if (slaves == NULL) + if (members == NULL) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; - if (internals->active_slave_count > len) + if (internals->active_member_count > len) return -1; - memcpy(slaves, internals->active_slaves, - internals->active_slave_count * sizeof(internals->active_slaves[0])); + memcpy(members, internals->active_members, + internals->active_member_count * sizeof(internals->active_members[0])); - return internals->active_slave_count; + return internals->active_member_count; } int -rte_eth_bond_mac_address_set(uint16_t bonded_port_id, +rte_eth_bond_mac_address_set(uint16_t bonding_port_id, struct rte_ether_addr *mac_addr) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - internals = bonded_eth_dev->data->dev_private; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + internals = bonding_eth_dev->data->dev_private; - /* Set MAC Address of Bonded Device */ - if (mac_address_set(bonded_eth_dev, mac_addr)) + /* Set MAC Address of Bonding Device */ + if (mac_address_set(bonding_eth_dev, mac_addr)) return -1; internals->user_defined_mac = 1; - /* Update all slave devices MACs*/ - if (internals->slave_count > 0) - return mac_address_slaves_update(bonded_eth_dev); + /* Update all member devices MACs*/ + if (internals->member_count > 0) + return mac_address_members_update(bonding_eth_dev); return 0; } int -rte_eth_bond_mac_address_reset(uint16_t bonded_port_id) +rte_eth_bond_mac_address_reset(uint16_t bonding_port_id) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - internals = bonded_eth_dev->data->dev_private; + bonding_eth_dev = &rte_eth_devices[bonding_port_id]; + internals = bonding_eth_dev->data->dev_private; internals->user_defined_mac = 0; - if (internals->slave_count > 0) { - int slave_port; - /* Get the primary slave location based on the primary port - * number as, while slave_add(), we will keep the primary - * slave based on slave_count,but not based on the primary port. + if (internals->member_count > 0) { + int member_port; + /* Get the primary member location based on the primary port + * number as, while member_add(), we will keep the primary + * member based on member_count,but not based on the primary port. */ - for (slave_port = 0; slave_port < internals->slave_count; - slave_port++) { - if (internals->slaves[slave_port].port_id == + for (member_port = 0; member_port < internals->member_count; + member_port++) { + if (internals->members[member_port].port_id == internals->primary_port) break; } - /* Set MAC Address of Bonded Device */ - if (mac_address_set(bonded_eth_dev, - &internals->slaves[slave_port].persisted_mac_addr) + /* Set MAC Address of Bonding Device */ + if (mac_address_set(bonding_eth_dev, + &internals->members[member_port].persisted_mac_addr) != 0) { - RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device"); + RTE_BOND_LOG(ERR, "Failed to set MAC address on bonding device"); return -1; } - /* Update all slave devices MAC addresses */ - return mac_address_slaves_update(bonded_eth_dev); + /* Update all member devices MAC addresses */ + return mac_address_members_update(bonding_eth_dev); } - /* No need to update anything as no slaves present */ + /* No need to update anything as no members present */ return 0; } int -rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy) +rte_eth_bond_xmit_policy_set(uint16_t bonding_port_id, uint8_t policy) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; switch (policy) { case BALANCE_XMIT_POLICY_LAYER2: @@ -993,98 +1003,98 @@ rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy) } int -rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id) +rte_eth_bond_xmit_policy_get(uint16_t bonding_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; return internals->balance_xmit_policy; } int -rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms) +rte_eth_bond_link_monitoring_set(uint16_t bonding_port_id, uint32_t internal_ms) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; internals->link_status_polling_interval_ms = internal_ms; return 0; } int -rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id) +rte_eth_bond_link_monitoring_get(uint16_t bonding_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; return internals->link_status_polling_interval_ms; } int -rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id, +rte_eth_bond_link_down_prop_delay_set(uint16_t bonding_port_id, uint32_t delay_ms) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; internals->link_down_delay_ms = delay_ms; return 0; } int -rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id) +rte_eth_bond_link_down_prop_delay_get(uint16_t bonding_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; return internals->link_down_delay_ms; } int -rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms) +rte_eth_bond_link_up_prop_delay_set(uint16_t bonding_port_id, uint32_t delay_ms) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; internals->link_up_delay_ms = delay_ms; return 0; } int -rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id) +rte_eth_bond_link_up_prop_delay_get(uint16_t bonding_port_id) { struct bond_dev_private *internals; - if (valid_bonded_port_id(bonded_port_id) != 0) + if (valid_bonding_port_id(bonding_port_id) != 0) return -1; - internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals = rte_eth_devices[bonding_port_id].data->dev_private; return internals->link_up_delay_ms; } diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c index c137efd55f7..bdec5d61d4a 100644 --- a/drivers/net/bonding/rte_eth_bond_args.c +++ b/drivers/net/bonding/rte_eth_bond_args.c @@ -12,8 +12,8 @@ #include "eth_bond_private.h" const char *pmd_bond_init_valid_arguments[] = { - PMD_BOND_SLAVE_PORT_KVARG, - PMD_BOND_PRIMARY_SLAVE_KVARG, + PMD_BOND_MEMBER_PORT_KVARG, + PMD_BOND_PRIMARY_MEMBER_KVARG, PMD_BOND_MODE_KVARG, PMD_BOND_XMIT_POLICY_KVARG, PMD_BOND_SOCKET_ID_KVARG, @@ -109,31 +109,31 @@ parse_port_id(const char *port_str) } int -bond_ethdev_parse_slave_port_kvarg(const char *key, +bond_ethdev_parse_member_port_kvarg(const char *key, const char *value, void *extra_args) { - struct bond_ethdev_slave_ports *slave_ports; + struct bond_ethdev_member_ports *member_ports; if (value == NULL || extra_args == NULL) return -1; - slave_ports = extra_args; + member_ports = extra_args; - if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) { + if (strcmp(key, PMD_BOND_MEMBER_PORT_KVARG) == 0) { int port_id = parse_port_id(value); if (port_id < 0) { - RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", + RTE_BOND_LOG(ERR, "Invalid member port value (%s) specified", value); return -1; } else - slave_ports->slaves[slave_ports->slave_count++] = + member_ports->members[member_ports->member_count++] = port_id; } return 0; } int -bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused, +bond_ethdev_parse_member_mode_kvarg(const char *key __rte_unused, const char *value, void *extra_args) { uint8_t *mode; @@ -160,13 +160,13 @@ bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused, case BONDING_MODE_ALB: return 0; default: - RTE_BOND_LOG(ERR, "Invalid slave mode value (%s) specified", value); + RTE_BOND_LOG(ERR, "Invalid member mode value (%s) specified", value); return -1; } } int -bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused, +bond_ethdev_parse_member_agg_mode_kvarg(const char *key __rte_unused, const char *value, void *extra_args) { uint8_t *agg_mode; @@ -227,19 +227,19 @@ bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused, } int -bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused, +bond_ethdev_parse_primary_member_port_id_kvarg(const char *key __rte_unused, const char *value, void *extra_args) { - int primary_slave_port_id; + int primary_member_port_id; if (value == NULL || extra_args == NULL) return -1; - primary_slave_port_id = parse_port_id(value); - if (primary_slave_port_id < 0) + primary_member_port_id = parse_port_id(value); + if (primary_member_port_id < 0) return -1; - *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id; + *(uint16_t *)extra_args = (uint16_t)primary_member_port_id; return 0; } diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c index 65b77faae70..71a91675f79 100644 --- a/drivers/net/bonding/rte_eth_bond_flow.c +++ b/drivers/net/bonding/rte_eth_bond_flow.c @@ -69,12 +69,12 @@ bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, int i; int ret; - for (i = 0; i < internals->slave_count; i++) { - ret = rte_flow_validate(internals->slaves[i].port_id, attr, + for (i = 0; i < internals->member_count; i++) { + ret = rte_flow_validate(internals->members[i].port_id, attr, patterns, actions, err); if (ret) { RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed" - " for slave %d with error %d", i, ret); + " for member %d with error %d", i, ret); return ret; } } @@ -97,11 +97,11 @@ bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, NULL, rte_strerror(ENOMEM)); return NULL; } - for (i = 0; i < internals->slave_count; i++) { - flow->flows[i] = rte_flow_create(internals->slaves[i].port_id, + for (i = 0; i < internals->member_count; i++) { + flow->flows[i] = rte_flow_create(internals->members[i].port_id, attr, patterns, actions, err); if (unlikely(flow->flows[i] == NULL)) { - RTE_BOND_LOG(ERR, "Failed to create flow on slave %d", + RTE_BOND_LOG(ERR, "Failed to create flow on member %d", i); goto err; } @@ -109,10 +109,10 @@ bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, TAILQ_INSERT_TAIL(&internals->flow_list, flow, next); return flow; err: - /* Destroy all slaves flows. */ - for (i = 0; i < internals->slave_count; i++) { + /* Destroy all members flows. */ + for (i = 0; i < internals->member_count; i++) { if (flow->flows[i] != NULL) - rte_flow_destroy(internals->slaves[i].port_id, + rte_flow_destroy(internals->members[i].port_id, flow->flows[i], err); } bond_flow_release(&flow); @@ -127,15 +127,15 @@ bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, int i; int ret = 0; - for (i = 0; i < internals->slave_count; i++) { + for (i = 0; i < internals->member_count; i++) { int lret; if (unlikely(flow->flows[i] == NULL)) continue; - lret = rte_flow_destroy(internals->slaves[i].port_id, + lret = rte_flow_destroy(internals->members[i].port_id, flow->flows[i], err); if (unlikely(lret != 0)) { - RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:" + RTE_BOND_LOG(ERR, "Failed to destroy flow on member %d:" " %d", i, lret); ret = lret; } @@ -154,7 +154,7 @@ bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err) int ret = 0; int lret; - /* Destroy all bond flows from its slaves instead of flushing them to + /* Destroy all bond flows from its members instead of flushing them to * keep the LACP flow or any other external flows. */ RTE_TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) { @@ -163,7 +163,7 @@ bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err) ret = lret; } if (unlikely(ret != 0)) - RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves"); + RTE_BOND_LOG(ERR, "Failed to flush flow in all members"); return ret; } @@ -174,26 +174,26 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *err) { struct bond_dev_private *internals = dev->data->dev_private; - struct rte_flow_query_count slave_count; + struct rte_flow_query_count member_count; int i; int ret; count->bytes = 0; count->hits = 0; - rte_memcpy(&slave_count, count, sizeof(slave_count)); - for (i = 0; i < internals->slave_count; i++) { - ret = rte_flow_query(internals->slaves[i].port_id, + rte_memcpy(&member_count, count, sizeof(member_count)); + for (i = 0; i < internals->member_count; i++) { + ret = rte_flow_query(internals->members[i].port_id, flow->flows[i], action, - &slave_count, err); + &member_count, err); if (unlikely(ret != 0)) { RTE_BOND_LOG(ERR, "Failed to query flow on" - " slave %d: %d", i, ret); + " member %d: %d", i, ret); return ret; } - count->bytes += slave_count.bytes; - count->hits += slave_count.hits; - slave_count.bytes = 0; - slave_count.hits = 0; + count->bytes += member_count.bytes; + count->hits += member_count.hits; + member_count.bytes = 0; + member_count.hits = 0; } return 0; } @@ -221,11 +221,11 @@ bond_flow_isolate(struct rte_eth_dev *dev, int set, int i; int ret; - for (i = 0; i < internals->slave_count; i++) { - ret = rte_flow_isolate(internals->slaves[i].port_id, set, err); + for (i = 0; i < internals->member_count; i++) { + ret = rte_flow_isolate(internals->members[i].port_id, set, err); if (unlikely(ret != 0)) { RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed" - " for slave %d with error %d", i, ret); + " for member %d with error %d", i, ret); internals->flow_isolated_valid = 0; return ret; } diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 73205f78f4e..c40d18d128b 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -61,33 +61,35 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct bond_dev_private *internals; uint16_t num_rx_total = 0; - uint16_t slave_count; - uint16_t active_slave; + uint16_t member_count; + uint16_t active_member; int i; - /* Cast to structure, containing bonded device's port id and queue id */ + /* Cast to structure, containing bonding device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; - slave_count = internals->active_slave_count; - active_slave = bd_rx_q->active_slave; + member_count = internals->active_member_count; + active_member = bd_rx_q->active_member; - for (i = 0; i < slave_count && nb_pkts; i++) { - uint16_t num_rx_slave; + for (i = 0; i < member_count && nb_pkts; i++) { + uint16_t num_rx_member; - /* Offset of pointer to *bufs increases as packets are received - * from other slaves */ - num_rx_slave = - rte_eth_rx_burst(internals->active_slaves[active_slave], + /* + * Offset of pointer to *bufs increases as packets are received + * from other members. + */ + num_rx_member = + rte_eth_rx_burst(internals->active_members[active_member], bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts); - num_rx_total += num_rx_slave; - nb_pkts -= num_rx_slave; - if (++active_slave >= slave_count) - active_slave = 0; + num_rx_total += num_rx_member; + nb_pkts -= num_rx_member; + if (++active_member >= member_count) + active_member = 0; } - if (++bd_rx_q->active_slave >= slave_count) - bd_rx_q->active_slave = 0; + if (++bd_rx_q->active_member >= member_count) + bd_rx_q->active_member = 0; return num_rx_total; } @@ -97,7 +99,7 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, { struct bond_dev_private *internals; - /* Cast to structure, containing bonded device's port id and queue id */ + /* Cast to structure, containing bonding device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; @@ -158,8 +160,8 @@ const struct rte_flow_attr flow_attr_8023ad = { int bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, - uint16_t slave_port) { - struct rte_eth_dev_info slave_info; + uint16_t member_port) { + struct rte_eth_dev_info member_info; struct rte_flow_error error; struct bond_dev_private *internals = bond_dev->data->dev_private; @@ -177,29 +179,29 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, } }; - int ret = rte_flow_validate(slave_port, &flow_attr_8023ad, + int ret = rte_flow_validate(member_port, &flow_attr_8023ad, flow_item_8023ad, actions, &error); if (ret < 0) { - RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)", - __func__, error.message, slave_port, + RTE_BOND_LOG(ERR, "%s: %s (member_port=%d queue_id=%d)", + __func__, error.message, member_port, internals->mode4.dedicated_queues.rx_qid); return -1; } - ret = rte_eth_dev_info_get(slave_port, &slave_info); + ret = rte_eth_dev_info_get(member_port, &member_info); if (ret != 0) { RTE_BOND_LOG(ERR, "%s: Error during getting device (port %u) info: %s\n", - __func__, slave_port, strerror(-ret)); + __func__, member_port, strerror(-ret)); return ret; } - if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues || - slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) { + if (member_info.max_rx_queues < bond_dev->data->nb_rx_queues || + member_info.max_tx_queues < bond_dev->data->nb_tx_queues) { RTE_BOND_LOG(ERR, - "%s: Slave %d capabilities doesn't allow allocating additional queues", - __func__, slave_port); + "%s: Member %d capabilities doesn't allow allocating additional queues", + __func__, member_port); return -1; } @@ -214,8 +216,8 @@ bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { uint16_t idx; int ret; - /* Verify if all slaves in bonding supports flow director and */ - if (internals->slave_count > 0) { + /* Verify if all members in bonding supports flow director and */ + if (internals->member_count > 0) { ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info); if (ret != 0) { RTE_BOND_LOG(ERR, @@ -229,9 +231,9 @@ bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues; internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues; - for (idx = 0; idx < internals->slave_count; idx++) { + for (idx = 0; idx < internals->member_count; idx++) { if (bond_ethdev_8023ad_flow_verify(bond_dev, - internals->slaves[idx].port_id) != 0) + internals->members[idx].port_id) != 0) return -1; } } @@ -240,7 +242,7 @@ bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { } int -bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { +bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t member_port) { struct rte_flow_error error; struct bond_dev_private *internals = bond_dev->data->dev_private; @@ -258,12 +260,12 @@ bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { } }; - internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port, + internals->mode4.dedicated_queues.flow[member_port] = rte_flow_create(member_port, &flow_attr_8023ad, flow_item_8023ad, actions, &error); - if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) { + if (internals->mode4.dedicated_queues.flow[member_port] == NULL) { RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s " - "(slave_port=%d queue_id=%d)", - error.message, slave_port, + "(member_port=%d queue_id=%d)", + error.message, member_port, internals->mode4.dedicated_queues.rx_qid); return -1; } @@ -293,21 +295,21 @@ static inline uint16_t rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, bool dedicated_rxq) { - /* Cast to structure, containing bonded device's port id and queue id */ + /* Cast to structure, containing bonding device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; struct bond_dev_private *internals = bd_rx_q->dev_private; - struct rte_eth_dev *bonded_eth_dev = + struct rte_eth_dev *bonding_eth_dev = &rte_eth_devices[internals->port_id]; - struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs; + struct rte_ether_addr *bond_mac = bonding_eth_dev->data->mac_addrs; struct rte_ether_hdr *hdr; const uint16_t ether_type_slow_be = rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); uint16_t num_rx_total = 0; /* Total number of received packets */ - uint16_t slaves[RTE_MAX_ETHPORTS]; - uint16_t slave_count, idx; + uint16_t members[RTE_MAX_ETHPORTS]; + uint16_t member_count, idx; - uint8_t collecting; /* current slave collecting status */ + uint8_t collecting; /* current member collecting status */ const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id); const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id); uint8_t subtype; @@ -315,24 +317,24 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, uint16_t j; uint16_t k; - /* Copy slave list to protect against slave up/down changes during tx + /* Copy member list to protect against member up/down changes during tx * bursting */ - slave_count = internals->active_slave_count; - memcpy(slaves, internals->active_slaves, - sizeof(internals->active_slaves[0]) * slave_count); + member_count = internals->active_member_count; + memcpy(members, internals->active_members, + sizeof(internals->active_members[0]) * member_count); - idx = bd_rx_q->active_slave; - if (idx >= slave_count) { - bd_rx_q->active_slave = 0; + idx = bd_rx_q->active_member; + if (idx >= member_count) { + bd_rx_q->active_member = 0; idx = 0; } - for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { + for (i = 0; i < member_count && num_rx_total < nb_pkts; i++) { j = num_rx_total; - collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]], + collecting = ACTOR_STATE(&bond_mode_8023ad_ports[members[idx]], COLLECTING); - /* Read packets from this slave */ - num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id, + /* Read packets from this member */ + num_rx_total += rte_eth_rx_burst(members[idx], bd_rx_q->queue_id, &bufs[num_rx_total], nb_pkts - num_rx_total); for (k = j; k < 2 && k < num_rx_total; k++) @@ -348,7 +350,7 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, /* Remove packet from array if: * - it is slow packet but no dedicated rxq is present, - * - slave is not in collecting state, + * - member is not in collecting state, * - bonding interface is not in promiscuous mode and * packet address isn't in mac_addrs array: * - packet is unicast, @@ -367,7 +369,7 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, !allmulti)))) { if (hdr->ether_type == ether_type_slow_be) { bond_mode_8023ad_handle_slow_pkt( - internals, slaves[idx], bufs[j]); + internals, members[idx], bufs[j]); } else rte_pktmbuf_free(bufs[j]); @@ -380,12 +382,12 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, } else j++; } - if (unlikely(++idx == slave_count)) + if (unlikely(++idx == member_count)) idx = 0; } - if (++bd_rx_q->active_slave >= slave_count) - bd_rx_q->active_slave = 0; + if (++bd_rx_q->active_member >= member_count) + bd_rx_q->active_member = 0; return num_rx_total; } @@ -406,7 +408,7 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) uint32_t burstnumberRX; -uint32_t burstnumberTX; +uint32_t burst_number_TX; #ifdef RTE_LIBRTE_BOND_DEBUG_ALB @@ -583,59 +585,61 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, struct bond_dev_private *internals; struct bond_tx_queue *bd_tx_q; - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; - uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + struct rte_mbuf *member_bufs[RTE_MAX_ETHPORTS][nb_pkts]; + uint16_t member_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; - uint16_t num_of_slaves; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t num_of_members; + uint16_t members[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0, num_tx_slave; + uint16_t num_tx_total = 0, num_tx_member; - static int slave_idx = 0; - int i, cslave_idx = 0, tx_fail_total = 0; + static int member_idx; + int i, cmember_idx = 0, tx_fail_total = 0; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; - /* Copy slave list to protect against slave up/down changes during tx + /* Copy member list to protect against member up/down changes during tx * bursting */ - num_of_slaves = internals->active_slave_count; - memcpy(slaves, internals->active_slaves, - sizeof(internals->active_slaves[0]) * num_of_slaves); + num_of_members = internals->active_member_count; + memcpy(members, internals->active_members, + sizeof(internals->active_members[0]) * num_of_members); - if (num_of_slaves < 1) + if (num_of_members < 1) return num_tx_total; - /* Populate slaves mbuf with which packets are to be sent on it */ + /* Populate members mbuf with which packets are to be sent on it */ for (i = 0; i < nb_pkts; i++) { - cslave_idx = (slave_idx + i) % num_of_slaves; - slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i]; + cmember_idx = (member_idx + i) % num_of_members; + member_bufs[cmember_idx][(member_nb_pkts[cmember_idx])++] = bufs[i]; } - /* increment current slave index so the next call to tx burst starts on the - * next slave */ - slave_idx = ++cslave_idx; + /* + * increment current member index so the next call to tx burst starts on the + * next member. + */ + member_idx = ++cmember_idx; - /* Send packet burst on each slave device */ - for (i = 0; i < num_of_slaves; i++) { - if (slave_nb_pkts[i] > 0) { - num_tx_slave = rte_eth_tx_prepare(slaves[i], - bd_tx_q->queue_id, slave_bufs[i], - slave_nb_pkts[i]); - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - slave_bufs[i], num_tx_slave); + /* Send packet burst on each member device */ + for (i = 0; i < num_of_members; i++) { + if (member_nb_pkts[i] > 0) { + num_tx_member = rte_eth_tx_prepare(members[i], + bd_tx_q->queue_id, member_bufs[i], + member_nb_pkts[i]); + num_tx_member = rte_eth_tx_burst(members[i], bd_tx_q->queue_id, + member_bufs[i], num_tx_member); /* if tx burst fails move packets to end of bufs */ - if (unlikely(num_tx_slave < slave_nb_pkts[i])) { - int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave; + if (unlikely(num_tx_member < member_nb_pkts[i])) { + int tx_fail_member = member_nb_pkts[i] - num_tx_member; - tx_fail_total += tx_fail_slave; + tx_fail_total += tx_fail_member; memcpy(&bufs[nb_pkts - tx_fail_total], - &slave_bufs[i][num_tx_slave], - tx_fail_slave * sizeof(bufs[0])); + &member_bufs[i][num_tx_member], + tx_fail_member * sizeof(bufs[0])); } - num_tx_total += num_tx_slave; + num_tx_total += num_tx_member; } } @@ -653,7 +657,7 @@ bond_ethdev_tx_burst_active_backup(void *queue, bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; - if (internals->active_slave_count < 1) + if (internals->active_member_count < 1) return 0; nb_prep_pkts = rte_eth_tx_prepare(internals->current_primary_port, @@ -699,7 +703,7 @@ ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr) void burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves) + uint16_t member_count, uint16_t *members) { struct rte_ether_hdr *eth_hdr; uint32_t hash; @@ -710,13 +714,13 @@ burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts, hash = ether_hash(eth_hdr); - slaves[i] = (hash ^= hash >> 8) % slave_count; + members[i] = (hash ^= hash >> 8) % member_count; } } void burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves) + uint16_t member_count, uint16_t *members) { uint16_t i; struct rte_ether_hdr *eth_hdr; @@ -748,13 +752,13 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, hash ^= hash >> 16; hash ^= hash >> 8; - slaves[i] = hash % slave_count; + members[i] = hash % member_count; } } void burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, - uint16_t slave_count, uint16_t *slaves) + uint16_t member_count, uint16_t *members) { struct rte_ether_hdr *eth_hdr; uint16_t proto; @@ -822,30 +826,29 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, hash ^= hash >> 16; hash ^= hash >> 8; - slaves[i] = hash % slave_count; + members[i] = hash % member_count; } } -struct bwg_slave { +struct bwg_member { uint64_t bwg_left_int; uint64_t bwg_left_remainder; - uint16_t slave; + uint16_t member; }; void -bond_tlb_activate_slave(struct bond_dev_private *internals) { +bond_tlb_activate_member(struct bond_dev_private *internals) { int i; - for (i = 0; i < internals->active_slave_count; i++) { - tlb_last_obytets[internals->active_slaves[i]] = 0; - } + for (i = 0; i < internals->active_member_count; i++) + tlb_last_obytets[internals->active_members[i]] = 0; } static int bandwidth_cmp(const void *a, const void *b) { - const struct bwg_slave *bwg_a = a; - const struct bwg_slave *bwg_b = b; + const struct bwg_member *bwg_a = a; + const struct bwg_member *bwg_b = b; int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int; int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder - (int64_t)bwg_a->bwg_left_remainder; @@ -863,14 +866,14 @@ bandwidth_cmp(const void *a, const void *b) static void bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx, - struct bwg_slave *bwg_slave) + struct bwg_member *bwg_member) { struct rte_eth_link link_status; int ret; ret = rte_eth_link_get_nowait(port_id, &link_status); if (ret < 0) { - RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s", + RTE_BOND_LOG(ERR, "Member (port %u) link get failed: %s", port_id, rte_strerror(-ret)); return; } @@ -878,51 +881,51 @@ bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx, if (link_bwg == 0) return; link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS; - bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg; - bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg; + bwg_member->bwg_left_int = (link_bwg - 1000 * load) / link_bwg; + bwg_member->bwg_left_remainder = (link_bwg - 1000 * load) % link_bwg; } static void -bond_ethdev_update_tlb_slave_cb(void *arg) +bond_ethdev_update_tlb_member_cb(void *arg) { struct bond_dev_private *internals = arg; - struct rte_eth_stats slave_stats; - struct bwg_slave bwg_array[RTE_MAX_ETHPORTS]; - uint16_t slave_count; + struct rte_eth_stats member_stats; + struct bwg_member bwg_array[RTE_MAX_ETHPORTS]; + uint16_t member_count; uint64_t tx_bytes; uint8_t update_stats = 0; - uint16_t slave_id; + uint16_t member_id; uint16_t i; - internals->slave_update_idx++; + internals->member_update_idx++; - if (internals->slave_update_idx >= REORDER_PERIOD_MS) + if (internals->member_update_idx >= REORDER_PERIOD_MS) update_stats = 1; - for (i = 0; i < internals->active_slave_count; i++) { - slave_id = internals->active_slaves[i]; - rte_eth_stats_get(slave_id, &slave_stats); - tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; - bandwidth_left(slave_id, tx_bytes, - internals->slave_update_idx, &bwg_array[i]); - bwg_array[i].slave = slave_id; + for (i = 0; i < internals->active_member_count; i++) { + member_id = internals->active_members[i]; + rte_eth_stats_get(member_id, &member_stats); + tx_bytes = member_stats.obytes - tlb_last_obytets[member_id]; + bandwidth_left(member_id, tx_bytes, + internals->member_update_idx, &bwg_array[i]); + bwg_array[i].member = member_id; if (update_stats) { - tlb_last_obytets[slave_id] = slave_stats.obytes; + tlb_last_obytets[member_id] = member_stats.obytes; } } if (update_stats == 1) - internals->slave_update_idx = 0; + internals->member_update_idx = 0; - slave_count = i; - qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp); - for (i = 0; i < slave_count; i++) - internals->tlb_slaves_order[i] = bwg_array[i].slave; + member_count = i; + qsort(bwg_array, member_count, sizeof(bwg_array[0]), bandwidth_cmp); + for (i = 0; i < member_count; i++) + internals->tlb_members_order[i] = bwg_array[i].member; - rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, + rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_member_cb, (struct bond_dev_private *)internals); } @@ -937,29 +940,29 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t num_tx_total = 0, num_tx_prep; uint16_t i, j; - uint16_t num_of_slaves = internals->active_slave_count; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t num_of_members = internals->active_member_count; + uint16_t members[RTE_MAX_ETHPORTS]; struct rte_ether_hdr *ether_hdr; - struct rte_ether_addr primary_slave_addr; - struct rte_ether_addr active_slave_addr; + struct rte_ether_addr primary_member_addr; + struct rte_ether_addr active_member_addr; - if (num_of_slaves < 1) + if (num_of_members < 1) return num_tx_total; - memcpy(slaves, internals->tlb_slaves_order, - sizeof(internals->tlb_slaves_order[0]) * num_of_slaves); + memcpy(members, internals->tlb_members_order, + sizeof(internals->tlb_members_order[0]) * num_of_members); - rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr); + rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_member_addr); if (nb_pkts > 3) { for (i = 0; i < 3; i++) rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*)); } - for (i = 0; i < num_of_slaves; i++) { - rte_eth_macaddr_get(slaves[i], &active_slave_addr); + for (i = 0; i < num_of_members; i++) { + rte_eth_macaddr_get(members[i], &active_member_addr); for (j = num_tx_total; j < nb_pkts; j++) { if (j + 3 < nb_pkts) rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*)); @@ -967,17 +970,18 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ether_hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *); if (rte_is_same_ether_addr(ðer_hdr->src_addr, - &primary_slave_addr)) - rte_ether_addr_copy(&active_slave_addr, + &primary_member_addr)) + rte_ether_addr_copy(&active_member_addr, ðer_hdr->src_addr); #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) - mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX); + mode6_debug("TX IPv4:", ether_hdr, members[i], + &burst_number_TX); #endif } - num_tx_prep = rte_eth_tx_prepare(slaves[i], bd_tx_q->queue_id, + num_tx_prep = rte_eth_tx_prepare(members[i], bd_tx_q->queue_id, bufs + num_tx_total, nb_pkts - num_tx_total); - num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + num_tx_total += rte_eth_tx_burst(members[i], bd_tx_q->queue_id, bufs + num_tx_total, num_tx_prep); if (num_tx_total == nb_pkts) @@ -990,13 +994,13 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) void bond_tlb_disable(struct bond_dev_private *internals) { - rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals); + rte_eal_alarm_cancel(bond_ethdev_update_tlb_member_cb, internals); } void bond_tlb_enable(struct bond_dev_private *internals) { - bond_ethdev_update_tlb_slave_cb(internals); + bond_ethdev_update_tlb_member_cb(internals); } static uint16_t @@ -1011,11 +1015,11 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct client_data *client_info; /* - * We create transmit buffers for every slave and one additional to send + * We create transmit buffers for every member and one additional to send * through tlb. In worst case every packet will be send on one port. */ - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts]; - uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 }; + struct rte_mbuf *member_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts]; + uint16_t member_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 }; /* * We create separate transmit buffers for update packets as they won't @@ -1029,7 +1033,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t num_send, num_not_send = 0; uint16_t num_tx_total = 0; - uint16_t slave_idx; + uint16_t member_idx; int i, j; @@ -1040,19 +1044,19 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) offset = get_vlan_offset(eth_h, ðer_type); if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { - slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); + member_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); /* Change src mac in eth header */ - rte_eth_macaddr_get(slave_idx, ð_h->src_addr); + rte_eth_macaddr_get(member_idx, ð_h->src_addr); - /* Add packet to slave tx buffer */ - slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i]; - slave_bufs_pkts[slave_idx]++; + /* Add packet to member tx buffer */ + member_bufs[member_idx][member_bufs_pkts[member_idx]] = bufs[i]; + member_bufs_pkts[member_idx]++; } else { /* If packet is not ARP, send it with TLB policy */ - slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] = + member_bufs[RTE_MAX_ETHPORTS][member_bufs_pkts[RTE_MAX_ETHPORTS]] = bufs[i]; - slave_bufs_pkts[RTE_MAX_ETHPORTS]++; + member_bufs_pkts[RTE_MAX_ETHPORTS]++; } } @@ -1062,7 +1066,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) client_info = &internals->mode6.client_table[i]; if (client_info->in_use) { - /* Allocate new packet to send ARP update on current slave */ + /* Allocate new packet to send ARP update on current member */ upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool); if (upd_pkt == NULL) { RTE_BOND_LOG(ERR, @@ -1076,44 +1080,44 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) upd_pkt->data_len = pkt_size; upd_pkt->pkt_len = pkt_size; - slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt, + member_idx = bond_mode_alb_arp_upd(client_info, upd_pkt, internals); /* Add packet to update tx buffer */ - update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt; - update_bufs_pkts[slave_idx]++; + update_bufs[member_idx][update_bufs_pkts[member_idx]] = upd_pkt; + update_bufs_pkts[member_idx]++; } } internals->mode6.ntt = 0; } - /* Send ARP packets on proper slaves */ + /* Send ARP packets on proper members */ for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (slave_bufs_pkts[i] > 0) { + if (member_bufs_pkts[i] > 0) { num_send = rte_eth_tx_prepare(i, bd_tx_q->queue_id, - slave_bufs[i], slave_bufs_pkts[i]); + member_bufs[i], member_bufs_pkts[i]); num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, - slave_bufs[i], num_send); - for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) { + member_bufs[i], num_send); + for (j = 0; j < member_bufs_pkts[i] - num_send; j++) { bufs[nb_pkts - 1 - num_not_send - j] = - slave_bufs[i][nb_pkts - 1 - j]; + member_bufs[i][nb_pkts - 1 - j]; } num_tx_total += num_send; - num_not_send += slave_bufs_pkts[i] - num_send; + num_not_send += member_bufs_pkts[i] - num_send; #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) /* Print TX stats including update packets */ - for (j = 0; j < slave_bufs_pkts[i]; j++) { - eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], + for (j = 0; j < member_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(member_bufs[i][j], struct rte_ether_hdr *); - mode6_debug("TX ARP:", eth_h, i, &burstnumberTX); + mode6_debug("TX ARP:", eth_h, i, &burst_number_TX); } #endif } } - /* Send update packets on proper slaves */ + /* Send update packets on proper members */ for (i = 0; i < RTE_MAX_ETHPORTS; i++) { if (update_bufs_pkts[i] > 0) { num_send = rte_eth_tx_prepare(i, bd_tx_q->queue_id, @@ -1127,21 +1131,21 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (j = 0; j < update_bufs_pkts[i]; j++) { eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct rte_ether_hdr *); - mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX); + mode6_debug("TX ARPupd:", eth_h, i, &burst_number_TX); } #endif } } /* Send non-ARP packets using tlb policy */ - if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { + if (member_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { num_send = bond_ethdev_tx_burst_tlb(queue, - slave_bufs[RTE_MAX_ETHPORTS], - slave_bufs_pkts[RTE_MAX_ETHPORTS]); + member_bufs[RTE_MAX_ETHPORTS], + member_bufs_pkts[RTE_MAX_ETHPORTS]); - for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) { + for (j = 0; j < member_bufs_pkts[RTE_MAX_ETHPORTS]; j++) { bufs[nb_pkts - 1 - num_not_send - j] = - slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j]; + member_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j]; } num_tx_total += num_send; @@ -1152,59 +1156,59 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static inline uint16_t tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, - uint16_t *slave_port_ids, uint16_t slave_count) + uint16_t *member_port_ids, uint16_t member_count) { struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; struct bond_dev_private *internals = bd_tx_q->dev_private; - /* Array to sort mbufs for transmission on each slave into */ - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; - /* Number of mbufs for transmission on each slave */ - uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; - /* Mapping array generated by hash function to map mbufs to slaves */ - uint16_t bufs_slave_port_idxs[nb_bufs]; + /* Array to sort mbufs for transmission on each member into */ + struct rte_mbuf *member_bufs[RTE_MAX_ETHPORTS][nb_bufs]; + /* Number of mbufs for transmission on each member */ + uint16_t member_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; + /* Mapping array generated by hash function to map mbufs to members */ + uint16_t bufs_member_port_idxs[nb_bufs]; - uint16_t slave_tx_count; + uint16_t member_tx_count; uint16_t total_tx_count = 0, total_tx_fail_count = 0; uint16_t i; /* - * Populate slaves mbuf with the packets which are to be sent on it - * selecting output slave using hash based on xmit policy + * Populate members mbuf with the packets which are to be sent on it + * selecting output member using hash based on xmit policy */ - internals->burst_xmit_hash(bufs, nb_bufs, slave_count, - bufs_slave_port_idxs); + internals->burst_xmit_hash(bufs, nb_bufs, member_count, + bufs_member_port_idxs); for (i = 0; i < nb_bufs; i++) { - /* Populate slave mbuf arrays with mbufs for that slave. */ - uint16_t slave_idx = bufs_slave_port_idxs[i]; + /* Populate member mbuf arrays with mbufs for that member. */ + uint16_t member_idx = bufs_member_port_idxs[i]; - slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i]; + member_bufs[member_idx][member_nb_bufs[member_idx]++] = bufs[i]; } - /* Send packet burst on each slave device */ - for (i = 0; i < slave_count; i++) { - if (slave_nb_bufs[i] == 0) + /* Send packet burst on each member device */ + for (i = 0; i < member_count; i++) { + if (member_nb_bufs[i] == 0) continue; - slave_tx_count = rte_eth_tx_prepare(slave_port_ids[i], - bd_tx_q->queue_id, slave_bufs[i], - slave_nb_bufs[i]); - slave_tx_count = rte_eth_tx_burst(slave_port_ids[i], - bd_tx_q->queue_id, slave_bufs[i], - slave_tx_count); + member_tx_count = rte_eth_tx_prepare(member_port_ids[i], + bd_tx_q->queue_id, member_bufs[i], + member_nb_bufs[i]); + member_tx_count = rte_eth_tx_burst(member_port_ids[i], + bd_tx_q->queue_id, member_bufs[i], + member_tx_count); - total_tx_count += slave_tx_count; + total_tx_count += member_tx_count; /* If tx burst fails move packets to end of bufs */ - if (unlikely(slave_tx_count < slave_nb_bufs[i])) { - int slave_tx_fail_count = slave_nb_bufs[i] - - slave_tx_count; - total_tx_fail_count += slave_tx_fail_count; + if (unlikely(member_tx_count < member_nb_bufs[i])) { + int member_tx_fail_count = member_nb_bufs[i] - + member_tx_count; + total_tx_fail_count += member_tx_fail_count; memcpy(&bufs[nb_bufs - total_tx_fail_count], - &slave_bufs[i][slave_tx_count], - slave_tx_fail_count * sizeof(bufs[0])); + &member_bufs[i][member_tx_count], + member_tx_fail_count * sizeof(bufs[0])); } } @@ -1218,23 +1222,23 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; struct bond_dev_private *internals = bd_tx_q->dev_private; - uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; - uint16_t slave_count; + uint16_t member_port_ids[RTE_MAX_ETHPORTS]; + uint16_t member_count; if (unlikely(nb_bufs == 0)) return 0; - /* Copy slave list to protect against slave up/down changes during tx + /* Copy member list to protect against member up/down changes during tx * bursting */ - slave_count = internals->active_slave_count; - if (unlikely(slave_count < 1)) + member_count = internals->active_member_count; + if (unlikely(member_count < 1)) return 0; - memcpy(slave_port_ids, internals->active_slaves, - sizeof(slave_port_ids[0]) * slave_count); - return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids, - slave_count); + memcpy(member_port_ids, internals->active_members, + sizeof(member_port_ids[0]) * member_count); + return tx_burst_balance(queue, bufs, nb_bufs, member_port_ids, + member_count); } static inline uint16_t @@ -1244,31 +1248,31 @@ tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; struct bond_dev_private *internals = bd_tx_q->dev_private; - uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; - uint16_t slave_count; + uint16_t member_port_ids[RTE_MAX_ETHPORTS]; + uint16_t member_count; - uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS]; - uint16_t dist_slave_count; + uint16_t dist_member_port_ids[RTE_MAX_ETHPORTS]; + uint16_t dist_member_count; - uint16_t slave_tx_count; + uint16_t member_tx_count; uint16_t i; - /* Copy slave list to protect against slave up/down changes during tx + /* Copy member list to protect against member up/down changes during tx * bursting */ - slave_count = internals->active_slave_count; - if (unlikely(slave_count < 1)) + member_count = internals->active_member_count; + if (unlikely(member_count < 1)) return 0; - memcpy(slave_port_ids, internals->active_slaves, - sizeof(slave_port_ids[0]) * slave_count); + memcpy(member_port_ids, internals->active_members, + sizeof(member_port_ids[0]) * member_count); if (dedicated_txq) goto skip_tx_ring; /* Check for LACP control packets and send if available */ - for (i = 0; i < slave_count; i++) { - struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; + for (i = 0; i < member_count; i++) { + struct port *port = &bond_mode_8023ad_ports[member_port_ids[i]]; struct rte_mbuf *ctrl_pkt = NULL; if (likely(rte_ring_empty(port->tx_ring))) @@ -1276,15 +1280,15 @@ tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, if (rte_ring_dequeue(port->tx_ring, (void **)&ctrl_pkt) != -ENOENT) { - slave_tx_count = rte_eth_tx_prepare(slave_port_ids[i], + member_tx_count = rte_eth_tx_prepare(member_port_ids[i], bd_tx_q->queue_id, &ctrl_pkt, 1); - slave_tx_count = rte_eth_tx_burst(slave_port_ids[i], - bd_tx_q->queue_id, &ctrl_pkt, slave_tx_count); + member_tx_count = rte_eth_tx_burst(member_port_ids[i], + bd_tx_q->queue_id, &ctrl_pkt, member_tx_count); /* * re-enqueue LAG control plane packets to buffering * ring if transmission fails so the packet isn't lost. */ - if (slave_tx_count != 1) + if (member_tx_count != 1) rte_ring_enqueue(port->tx_ring, ctrl_pkt); } } @@ -1293,20 +1297,20 @@ tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, if (unlikely(nb_bufs == 0)) return 0; - dist_slave_count = 0; - for (i = 0; i < slave_count; i++) { - struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; + dist_member_count = 0; + for (i = 0; i < member_count; i++) { + struct port *port = &bond_mode_8023ad_ports[member_port_ids[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) - dist_slave_port_ids[dist_slave_count++] = - slave_port_ids[i]; + dist_member_port_ids[dist_member_count++] = + member_port_ids[i]; } - if (unlikely(dist_slave_count < 1)) + if (unlikely(dist_member_count < 1)) return 0; - return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids, - dist_slave_count); + return tx_burst_balance(queue, bufs, nb_bufs, dist_member_port_ids, + dist_member_count); } static uint16_t @@ -1330,78 +1334,78 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, struct bond_dev_private *internals; struct bond_tx_queue *bd_tx_q; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t members[RTE_MAX_ETHPORTS]; uint8_t tx_failed_flag = 0; - uint16_t num_of_slaves; + uint16_t num_of_members; uint16_t max_nb_of_tx_pkts = 0; - int slave_tx_total[RTE_MAX_ETHPORTS]; - int i, most_successful_tx_slave = -1; + int member_tx_total[RTE_MAX_ETHPORTS]; + int i, most_successful_tx_member = -1; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; - /* Copy slave list to protect against slave up/down changes during tx + /* Copy member list to protect against member up/down changes during tx * bursting */ - num_of_slaves = internals->active_slave_count; - memcpy(slaves, internals->active_slaves, - sizeof(internals->active_slaves[0]) * num_of_slaves); + num_of_members = internals->active_member_count; + memcpy(members, internals->active_members, + sizeof(internals->active_members[0]) * num_of_members); - if (num_of_slaves < 1) + if (num_of_members < 1) return 0; /* It is rare that bond different PMDs together, so just call tx-prepare once */ - nb_pkts = rte_eth_tx_prepare(slaves[0], bd_tx_q->queue_id, bufs, nb_pkts); + nb_pkts = rte_eth_tx_prepare(members[0], bd_tx_q->queue_id, bufs, nb_pkts); /* Increment reference count on mbufs */ for (i = 0; i < nb_pkts; i++) - rte_pktmbuf_refcnt_update(bufs[i], num_of_slaves - 1); + rte_pktmbuf_refcnt_update(bufs[i], num_of_members - 1); - /* Transmit burst on each active slave */ - for (i = 0; i < num_of_slaves; i++) { - slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + /* Transmit burst on each active member */ + for (i = 0; i < num_of_members; i++) { + member_tx_total[i] = rte_eth_tx_burst(members[i], bd_tx_q->queue_id, bufs, nb_pkts); - if (unlikely(slave_tx_total[i] < nb_pkts)) + if (unlikely(member_tx_total[i] < nb_pkts)) tx_failed_flag = 1; - /* record the value and slave index for the slave which transmits the + /* record the value and member index for the member which transmits the * maximum number of packets */ - if (slave_tx_total[i] > max_nb_of_tx_pkts) { - max_nb_of_tx_pkts = slave_tx_total[i]; - most_successful_tx_slave = i; + if (member_tx_total[i] > max_nb_of_tx_pkts) { + max_nb_of_tx_pkts = member_tx_total[i]; + most_successful_tx_member = i; } } - /* if slaves fail to transmit packets from burst, the calling application + /* if members fail to transmit packets from burst, the calling application * is not expected to know about multiple references to packets so we must - * handle failures of all packets except those of the most successful slave + * handle failures of all packets except those of the most successful member */ if (unlikely(tx_failed_flag)) - for (i = 0; i < num_of_slaves; i++) - if (i != most_successful_tx_slave) - while (slave_tx_total[i] < nb_pkts) - rte_pktmbuf_free(bufs[slave_tx_total[i]++]); + for (i = 0; i < num_of_members; i++) + if (i != most_successful_tx_member) + while (member_tx_total[i] < nb_pkts) + rte_pktmbuf_free(bufs[member_tx_total[i]++]); return max_nb_of_tx_pkts; } static void -link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link) +link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *member_link) { struct bond_dev_private *bond_ctx = ethdev->data->dev_private; if (bond_ctx->mode == BONDING_MODE_8023AD) { /** * If in mode 4 then save the link properties of the first - * slave, all subsequent slaves must match these properties + * member, all subsequent members must match these properties */ - struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link; + struct rte_eth_link *bond_link = &bond_ctx->mode4.member_link; - bond_link->link_autoneg = slave_link->link_autoneg; - bond_link->link_duplex = slave_link->link_duplex; - bond_link->link_speed = slave_link->link_speed; + bond_link->link_autoneg = member_link->link_autoneg; + bond_link->link_duplex = member_link->link_duplex; + bond_link->link_speed = member_link->link_speed; } else { /** * In any other mode the link properties are set to default @@ -1414,16 +1418,16 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link) static int link_properties_valid(struct rte_eth_dev *ethdev, - struct rte_eth_link *slave_link) + struct rte_eth_link *member_link) { struct bond_dev_private *bond_ctx = ethdev->data->dev_private; if (bond_ctx->mode == BONDING_MODE_8023AD) { - struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link; + struct rte_eth_link *bond_link = &bond_ctx->mode4.member_link; - if (bond_link->link_duplex != slave_link->link_duplex || - bond_link->link_autoneg != slave_link->link_autoneg || - bond_link->link_speed != slave_link->link_speed) + if (bond_link->link_duplex != member_link->link_duplex || + bond_link->link_autoneg != member_link->link_autoneg || + bond_link->link_speed != member_link->link_speed) return -1; } @@ -1480,26 +1484,26 @@ mac_address_set(struct rte_eth_dev *eth_dev, static const struct rte_ether_addr null_mac_addr; /* - * Add additional MAC addresses to the slave + * Add additional MAC addresses to the member */ int -slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, - uint16_t slave_port_id) +member_add_mac_addresses(struct rte_eth_dev *bonding_eth_dev, + uint16_t member_port_id) { int i, ret; struct rte_ether_addr *mac_addr; for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) { - mac_addr = &bonded_eth_dev->data->mac_addrs[i]; + mac_addr = &bonding_eth_dev->data->mac_addrs[i]; if (rte_is_same_ether_addr(mac_addr, &null_mac_addr)) break; - ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0); + ret = rte_eth_dev_mac_addr_add(member_port_id, mac_addr, 0); if (ret < 0) { /* rollback */ for (i--; i > 0; i--) - rte_eth_dev_mac_addr_remove(slave_port_id, - &bonded_eth_dev->data->mac_addrs[i]); + rte_eth_dev_mac_addr_remove(member_port_id, + &bonding_eth_dev->data->mac_addrs[i]); return ret; } } @@ -1508,22 +1512,22 @@ slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, } /* - * Remove additional MAC addresses from the slave + * Remove additional MAC addresses from the member */ int -slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, - uint16_t slave_port_id) +member_remove_mac_addresses(struct rte_eth_dev *bonding_eth_dev, + uint16_t member_port_id) { int i, rc, ret; struct rte_ether_addr *mac_addr; rc = 0; for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) { - mac_addr = &bonded_eth_dev->data->mac_addrs[i]; + mac_addr = &bonding_eth_dev->data->mac_addrs[i]; if (rte_is_same_ether_addr(mac_addr, &null_mac_addr)) break; - ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr); + ret = rte_eth_dev_mac_addr_remove(member_port_id, mac_addr); /* save only the first error */ if (ret < 0 && rc == 0) rc = ret; @@ -1533,54 +1537,54 @@ slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, } int -mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) +mac_address_members_update(struct rte_eth_dev *bonding_eth_dev) { - struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonding_eth_dev->data->dev_private; bool set; int i; - /* Update slave devices MAC addresses */ - if (internals->slave_count < 1) + /* Update member devices MAC addresses */ + if (internals->member_count < 1) return -1; switch (internals->mode) { case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: - for (i = 0; i < internals->slave_count; i++) { + for (i = 0; i < internals->member_count; i++) { if (rte_eth_dev_default_mac_addr_set( - internals->slaves[i].port_id, - bonded_eth_dev->data->mac_addrs)) { + internals->members[i].port_id, + bonding_eth_dev->data->mac_addrs)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", - internals->slaves[i].port_id); + internals->members[i].port_id); return -1; } } break; case BONDING_MODE_8023AD: - bond_mode_8023ad_mac_address_update(bonded_eth_dev); + bond_mode_8023ad_mac_address_update(bonding_eth_dev); break; case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: set = true; - for (i = 0; i < internals->slave_count; i++) { - if (internals->slaves[i].port_id == + for (i = 0; i < internals->member_count; i++) { + if (internals->members[i].port_id == internals->current_primary_port) { if (rte_eth_dev_default_mac_addr_set( internals->current_primary_port, - bonded_eth_dev->data->mac_addrs)) { + bonding_eth_dev->data->mac_addrs)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", internals->current_primary_port); set = false; } } else { if (rte_eth_dev_default_mac_addr_set( - internals->slaves[i].port_id, - &internals->slaves[i].persisted_mac_addr)) { + internals->members[i].port_id, + &internals->members[i].persisted_mac_addr)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", - internals->slaves[i].port_id); + internals->members[i].port_id); } } } @@ -1655,55 +1659,55 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode) static int -slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, - struct rte_eth_dev *slave_eth_dev) +member_configure_slow_queue(struct rte_eth_dev *bonding_eth_dev, + struct rte_eth_dev *member_eth_dev) { int errval = 0; - struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; - struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; + struct bond_dev_private *internals = bonding_eth_dev->data->dev_private; + struct port *port = &bond_mode_8023ad_ports[member_eth_dev->data->port_id]; if (port->slow_pool == NULL) { char mem_name[256]; - int slave_id = slave_eth_dev->data->port_id; + int member_id = member_eth_dev->data->port_id; - snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool", - slave_id); + snprintf(mem_name, RTE_DIM(mem_name), "member_port%u_slow_pool", + member_id); port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191, 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE, - slave_eth_dev->data->numa_node); + member_eth_dev->data->numa_node); /* Any memory allocation failure in initialization is critical because * resources can't be free, so reinitialization is impossible. */ if (port->slow_pool == NULL) { - rte_panic("Slave %u: Failed to create memory pool '%s': %s\n", - slave_id, mem_name, rte_strerror(rte_errno)); + rte_panic("Member %u: Failed to create memory pool '%s': %s\n", + member_id, mem_name, rte_strerror(rte_errno)); } } if (internals->mode4.dedicated_queues.enabled == 1) { /* Configure slow Rx queue */ - errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, + errval = rte_eth_rx_queue_setup(member_eth_dev->data->port_id, internals->mode4.dedicated_queues.rx_qid, 128, - rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + rte_eth_dev_socket_id(member_eth_dev->data->port_id), NULL, port->slow_pool); if (errval != 0) { RTE_BOND_LOG(ERR, "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)", - slave_eth_dev->data->port_id, + member_eth_dev->data->port_id, internals->mode4.dedicated_queues.rx_qid, errval); return errval; } - errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, + errval = rte_eth_tx_queue_setup(member_eth_dev->data->port_id, internals->mode4.dedicated_queues.tx_qid, 512, - rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + rte_eth_dev_socket_id(member_eth_dev->data->port_id), NULL); if (errval != 0) { RTE_BOND_LOG(ERR, "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", - slave_eth_dev->data->port_id, + member_eth_dev->data->port_id, internals->mode4.dedicated_queues.tx_qid, errval); return errval; @@ -1713,59 +1717,59 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, } int -slave_configure(struct rte_eth_dev *bonded_eth_dev, - struct rte_eth_dev *slave_eth_dev) +member_configure(struct rte_eth_dev *bonding_eth_dev, + struct rte_eth_dev *member_eth_dev) { uint16_t nb_rx_queues; uint16_t nb_tx_queues; int errval; - struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonding_eth_dev->data->dev_private; - /* Stop slave */ - errval = rte_eth_dev_stop(slave_eth_dev->data->port_id); + /* Stop member */ + errval = rte_eth_dev_stop(member_eth_dev->data->port_id); if (errval != 0) RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)", - slave_eth_dev->data->port_id, errval); + member_eth_dev->data->port_id, errval); - /* Enable interrupts on slave device if supported */ - if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; + /* Enable interrupts on member device if supported */ + if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + member_eth_dev->data->dev_conf.intr_conf.lsc = 1; - /* If RSS is enabled for bonding, try to enable it for slaves */ - if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { - /* rss_key won't be empty if RSS is configured in bonded dev */ - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = + /* If RSS is enabled for bonding, try to enable it for members */ + if (bonding_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { + /* rss_key won't be empty if RSS is configured in bonding dev */ + member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = internals->rss_key_len; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = + member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = - bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; - slave_eth_dev->data->dev_conf.rxmode.mq_mode = - bonded_eth_dev->data->dev_conf.rxmode.mq_mode; + member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + bonding_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + member_eth_dev->data->dev_conf.rxmode.mq_mode = + bonding_eth_dev->data->dev_conf.rxmode.mq_mode; } else { - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; - slave_eth_dev->data->dev_conf.rxmode.mq_mode = - bonded_eth_dev->data->dev_conf.rxmode.mq_mode; + member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0; + member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; + member_eth_dev->data->dev_conf.rxmode.mq_mode = + bonding_eth_dev->data->dev_conf.rxmode.mq_mode; } - slave_eth_dev->data->dev_conf.rxmode.mtu = - bonded_eth_dev->data->dev_conf.rxmode.mtu; - slave_eth_dev->data->dev_conf.link_speeds = - bonded_eth_dev->data->dev_conf.link_speeds; + member_eth_dev->data->dev_conf.rxmode.mtu = + bonding_eth_dev->data->dev_conf.rxmode.mtu; + member_eth_dev->data->dev_conf.link_speeds = + bonding_eth_dev->data->dev_conf.link_speeds; - slave_eth_dev->data->dev_conf.txmode.offloads = - bonded_eth_dev->data->dev_conf.txmode.offloads; + member_eth_dev->data->dev_conf.txmode.offloads = + bonding_eth_dev->data->dev_conf.txmode.offloads; - slave_eth_dev->data->dev_conf.rxmode.offloads = - bonded_eth_dev->data->dev_conf.rxmode.offloads; + member_eth_dev->data->dev_conf.rxmode.offloads = + bonding_eth_dev->data->dev_conf.rxmode.offloads; - nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; - nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; + nb_rx_queues = bonding_eth_dev->data->nb_rx_queues; + nb_tx_queues = bonding_eth_dev->data->nb_tx_queues; if (internals->mode == BONDING_MODE_8023AD) { if (internals->mode4.dedicated_queues.enabled == 1) { @@ -1775,141 +1779,142 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } /* Configure device */ - errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, + errval = rte_eth_dev_configure(member_eth_dev->data->port_id, nb_rx_queues, nb_tx_queues, - &(slave_eth_dev->data->dev_conf)); + &member_eth_dev->data->dev_conf); if (errval != 0) { - RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)", - slave_eth_dev->data->port_id, errval); + RTE_BOND_LOG(ERR, "Cannot configure member device: port %u, err (%d)", + member_eth_dev->data->port_id, errval); return errval; } - errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id, - bonded_eth_dev->data->mtu); + errval = rte_eth_dev_set_mtu(member_eth_dev->data->port_id, + bonding_eth_dev->data->mtu); if (errval != 0 && errval != -ENOTSUP) { RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)", - slave_eth_dev->data->port_id, errval); + member_eth_dev->data->port_id, errval); return errval; } return 0; } int -slave_start(struct rte_eth_dev *bonded_eth_dev, - struct rte_eth_dev *slave_eth_dev) +member_start(struct rte_eth_dev *bonding_eth_dev, + struct rte_eth_dev *member_eth_dev) { int errval = 0; struct bond_rx_queue *bd_rx_q; struct bond_tx_queue *bd_tx_q; uint16_t q_id; struct rte_flow_error flow_error; - struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonding_eth_dev->data->dev_private; + uint16_t member_port_id = member_eth_dev->data->port_id; /* Setup Rx Queues */ - for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) { - bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id]; + for (q_id = 0; q_id < bonding_eth_dev->data->nb_rx_queues; q_id++) { + bd_rx_q = (struct bond_rx_queue *)bonding_eth_dev->data->rx_queues[q_id]; - errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id, + errval = rte_eth_rx_queue_setup(member_port_id, q_id, bd_rx_q->nb_rx_desc, - rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + rte_eth_dev_socket_id(member_port_id), &(bd_rx_q->rx_conf), bd_rx_q->mb_pool); if (errval != 0) { RTE_BOND_LOG(ERR, "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)", - slave_eth_dev->data->port_id, q_id, errval); + member_port_id, q_id, errval); return errval; } } /* Setup Tx Queues */ - for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) { - bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id]; + for (q_id = 0; q_id < bonding_eth_dev->data->nb_tx_queues; q_id++) { + bd_tx_q = (struct bond_tx_queue *)bonding_eth_dev->data->tx_queues[q_id]; - errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id, + errval = rte_eth_tx_queue_setup(member_port_id, q_id, bd_tx_q->nb_tx_desc, - rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + rte_eth_dev_socket_id(member_port_id), &bd_tx_q->tx_conf); if (errval != 0) { RTE_BOND_LOG(ERR, "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", - slave_eth_dev->data->port_id, q_id, errval); + member_port_id, q_id, errval); return errval; } } if (internals->mode == BONDING_MODE_8023AD && internals->mode4.dedicated_queues.enabled == 1) { - if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev) + if (member_configure_slow_queue(bonding_eth_dev, member_eth_dev) != 0) return errval; - errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev, - slave_eth_dev->data->port_id); + errval = bond_ethdev_8023ad_flow_verify(bonding_eth_dev, + member_port_id); if (errval != 0) { RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)", - slave_eth_dev->data->port_id, errval); + member_port_id, errval); return errval; } - if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) { - errval = rte_flow_destroy(slave_eth_dev->data->port_id, - internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id], + if (internals->mode4.dedicated_queues.flow[member_port_id] != NULL) { + errval = rte_flow_destroy(member_port_id, + internals->mode4.dedicated_queues.flow[member_port_id], &flow_error); RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_destroy: port=%d, err (%d)", - slave_eth_dev->data->port_id, errval); + member_port_id, errval); } } /* Start device */ - errval = rte_eth_dev_start(slave_eth_dev->data->port_id); + errval = rte_eth_dev_start(member_port_id); if (errval != 0) { RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)", - slave_eth_dev->data->port_id, errval); + member_port_id, errval); return -1; } if (internals->mode == BONDING_MODE_8023AD && internals->mode4.dedicated_queues.enabled == 1) { - errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev, - slave_eth_dev->data->port_id); + errval = bond_ethdev_8023ad_flow_set(bonding_eth_dev, + member_port_id); if (errval != 0) { RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: port=%d, err (%d)", - slave_eth_dev->data->port_id, errval); + member_port_id, errval); return errval; } } /* If RSS is enabled for bonding, synchronize RETA */ - if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { + if (bonding_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { int i; struct bond_dev_private *internals; - internals = bonded_eth_dev->data->dev_private; + internals = bonding_eth_dev->data->dev_private; - for (i = 0; i < internals->slave_count; i++) { - if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) { + for (i = 0; i < internals->member_count; i++) { + if (internals->members[i].port_id == member_port_id) { errval = rte_eth_dev_rss_reta_update( - slave_eth_dev->data->port_id, + member_port_id, &internals->reta_conf[0], - internals->slaves[i].reta_size); + internals->members[i].reta_size); if (errval != 0) { RTE_BOND_LOG(WARNING, - "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." + "rte_eth_dev_rss_reta_update on member port %d fails (err %d)." " RSS Configuration for bonding may be inconsistent.", - slave_eth_dev->data->port_id, errval); + member_port_id, errval); } break; } } } - /* If lsc interrupt is set, check initial slave's link status */ - if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { - slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0); - bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id, - RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id, + /* If lsc interrupt is set, check initial member's link status */ + if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { + member_eth_dev->dev_ops->link_update(member_eth_dev, 0); + bond_ethdev_lsc_event_callback(member_port_id, + RTE_ETH_EVENT_INTR_LSC, &bonding_eth_dev->data->port_id, NULL); } @@ -1917,75 +1922,74 @@ slave_start(struct rte_eth_dev *bonded_eth_dev, } void -slave_remove(struct bond_dev_private *internals, - struct rte_eth_dev *slave_eth_dev) +member_remove(struct bond_dev_private *internals, + struct rte_eth_dev *member_eth_dev) { uint16_t i; - for (i = 0; i < internals->slave_count; i++) - if (internals->slaves[i].port_id == - slave_eth_dev->data->port_id) + for (i = 0; i < internals->member_count; i++) + if (internals->members[i].port_id == + member_eth_dev->data->port_id) break; - if (i < (internals->slave_count - 1)) { + if (i < (internals->member_count - 1)) { struct rte_flow *flow; - memmove(&internals->slaves[i], &internals->slaves[i + 1], - sizeof(internals->slaves[0]) * - (internals->slave_count - i - 1)); + memmove(&internals->members[i], &internals->members[i + 1], + sizeof(internals->members[0]) * + (internals->member_count - i - 1)); TAILQ_FOREACH(flow, &internals->flow_list, next) { memmove(&flow->flows[i], &flow->flows[i + 1], sizeof(flow->flows[0]) * - (internals->slave_count - i - 1)); - flow->flows[internals->slave_count - 1] = NULL; + (internals->member_count - i - 1)); + flow->flows[internals->member_count - 1] = NULL; } } - internals->slave_count--; + internals->member_count--; - /* force reconfiguration of slave interfaces */ - rte_eth_dev_internal_reset(slave_eth_dev); + /* force reconfiguration of member interfaces */ + rte_eth_dev_internal_reset(member_eth_dev); } static void -bond_ethdev_slave_link_status_change_monitor(void *cb_arg); +bond_ethdev_member_link_status_change_monitor(void *cb_arg); void -slave_add(struct bond_dev_private *internals, - struct rte_eth_dev *slave_eth_dev) +member_add(struct bond_dev_private *internals, + struct rte_eth_dev *member_eth_dev) { - struct bond_slave_details *slave_details = - &internals->slaves[internals->slave_count]; + struct bond_member_details *member_details = + &internals->members[internals->member_count]; - slave_details->port_id = slave_eth_dev->data->port_id; - slave_details->last_link_status = 0; + member_details->port_id = member_eth_dev->data->port_id; + member_details->last_link_status = 0; - /* Mark slave devices that don't support interrupts so we can + /* Mark member devices that don't support interrupts so we can * compensate when we start the bond */ - if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { - slave_details->link_status_poll_enabled = 1; - } + if (!(member_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) + member_details->link_status_poll_enabled = 1; - slave_details->link_status_wait_to_complete = 0; + member_details->link_status_wait_to_complete = 0; /* clean tlb_last_obytes when adding port for bonding device */ - memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs, + memcpy(&member_details->persisted_mac_addr, member_eth_dev->data->mac_addrs, sizeof(struct rte_ether_addr)); } void bond_ethdev_primary_set(struct bond_dev_private *internals, - uint16_t slave_port_id) + uint16_t member_port_id) { int i; - if (internals->active_slave_count < 1) - internals->current_primary_port = slave_port_id; + if (internals->active_member_count < 1) + internals->current_primary_port = member_port_id; else - /* Search bonded device slave ports for new proposed primary port */ - for (i = 0; i < internals->active_slave_count; i++) { - if (internals->active_slaves[i] == slave_port_id) - internals->current_primary_port = slave_port_id; + /* Search bonding device member ports for new proposed primary port */ + for (i = 0; i < internals->active_member_count; i++) { + if (internals->active_members[i] == member_port_id) + internals->current_primary_port = member_port_id; } } @@ -1998,9 +2002,9 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) struct bond_dev_private *internals; int i; - /* slave eth dev will be started by bonded device */ - if (check_for_bonded_ethdev(eth_dev)) { - RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)", + /* member eth dev will be started by bonding device */ + if (check_for_bonding_ethdev(eth_dev)) { + RTE_BOND_LOG(ERR, "User tried to explicitly start a member eth_dev (%d)", eth_dev->data->port_id); return -1; } @@ -2010,23 +2014,23 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) internals = eth_dev->data->dev_private; - if (internals->slave_count == 0) { - RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); + if (internals->member_count == 0) { + RTE_BOND_LOG(ERR, "Cannot start port since there are no member devices"); goto out_err; } if (internals->user_defined_mac == 0) { struct rte_ether_addr *new_mac_addr = NULL; - for (i = 0; i < internals->slave_count; i++) - if (internals->slaves[i].port_id == internals->primary_port) - new_mac_addr = &internals->slaves[i].persisted_mac_addr; + for (i = 0; i < internals->member_count; i++) + if (internals->members[i].port_id == internals->primary_port) + new_mac_addr = &internals->members[i].persisted_mac_addr; if (new_mac_addr == NULL) goto out_err; if (mac_address_set(eth_dev, new_mac_addr) != 0) { - RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", + RTE_BOND_LOG(ERR, "bonding port (%d) failed to update MAC address", eth_dev->data->port_id); goto out_err; } @@ -2042,28 +2046,28 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) } - /* Reconfigure each slave device if starting bonded device */ - for (i = 0; i < internals->slave_count; i++) { - struct rte_eth_dev *slave_ethdev = - &(rte_eth_devices[internals->slaves[i].port_id]); - if (slave_configure(eth_dev, slave_ethdev) != 0) { + /* Reconfigure each member device if starting bonding device */ + for (i = 0; i < internals->member_count; i++) { + struct rte_eth_dev *member_ethdev = + &(rte_eth_devices[internals->members[i].port_id]); + if (member_configure(eth_dev, member_ethdev) != 0) { RTE_BOND_LOG(ERR, - "bonded port (%d) failed to reconfigure slave device (%d)", + "bonding port (%d) failed to reconfigure member device (%d)", eth_dev->data->port_id, - internals->slaves[i].port_id); + internals->members[i].port_id); goto out_err; } - if (slave_start(eth_dev, slave_ethdev) != 0) { + if (member_start(eth_dev, member_ethdev) != 0) { RTE_BOND_LOG(ERR, - "bonded port (%d) failed to start slave device (%d)", + "bonding port (%d) failed to start member device (%d)", eth_dev->data->port_id, - internals->slaves[i].port_id); + internals->members[i].port_id); goto out_err; } - /* We will need to poll for link status if any slave doesn't + /* We will need to poll for link status if any member doesn't * support interrupts */ - if (internals->slaves[i].link_status_poll_enabled) + if (internals->members[i].link_status_poll_enabled) internals->link_status_polling_enabled = 1; } @@ -2071,12 +2075,12 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->link_status_polling_enabled) { rte_eal_alarm_set( internals->link_status_polling_interval_ms * 1000, - bond_ethdev_slave_link_status_change_monitor, + bond_ethdev_member_link_status_change_monitor, (void *)&rte_eth_devices[internals->port_id]); } - /* Update all slave devices MACs*/ - if (mac_address_slaves_update(eth_dev) != 0) + /* Update all member devices MACs*/ + if (mac_address_members_update(eth_dev) != 0) goto out_err; if (internals->user_defined_primary_port) @@ -2089,6 +2093,11 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) internals->mode == BONDING_MODE_ALB) bond_tlb_enable(internals); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; out_err: @@ -2132,8 +2141,8 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) bond_mode_8023ad_stop(eth_dev); /* Discard all messages to/from mode 4 state machines */ - for (i = 0; i < internals->active_slave_count; i++) { - port = &bond_mode_8023ad_ports[internals->active_slaves[i]]; + for (i = 0; i < internals->active_member_count; i++) { + port = &bond_mode_8023ad_ports[internals->active_members[i]]; RTE_ASSERT(port->rx_ring != NULL); while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT) @@ -2148,32 +2157,41 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) if (internals->mode == BONDING_MODE_TLB || internals->mode == BONDING_MODE_ALB) { bond_tlb_disable(internals); - for (i = 0; i < internals->active_slave_count; i++) - tlb_last_obytets[internals->active_slaves[i]] = 0; + for (i = 0; i < internals->active_member_count; i++) + tlb_last_obytets[internals->active_members[i]] = 0; } eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; eth_dev->data->dev_started = 0; + if (internals->link_status_polling_enabled) { + rte_eal_alarm_cancel(bond_ethdev_member_link_status_change_monitor, + (void *)&rte_eth_devices[internals->port_id]); + } internals->link_status_polling_enabled = 0; - for (i = 0; i < internals->slave_count; i++) { - uint16_t slave_id = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) { + uint16_t member_id = internals->members[i].port_id; - internals->slaves[i].last_link_status = 0; - ret = rte_eth_dev_stop(slave_id); + internals->members[i].last_link_status = 0; + ret = rte_eth_dev_stop(member_id); if (ret != 0) { RTE_BOND_LOG(ERR, "Failed to stop device on port %u", - slave_id); + member_id); return ret; } - /* active slaves need to be deactivated. */ - if (find_slave_by_id(internals->active_slaves, - internals->active_slave_count, slave_id) != - internals->active_slave_count) - deactivate_slave(eth_dev, slave_id); + /* active members need to be deactivated. */ + if (find_member_by_id(internals->active_members, + internals->active_member_count, member_id) != + internals->active_member_count) + deactivate_member(eth_dev, member_id); } + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -2188,8 +2206,8 @@ bond_ethdev_cfg_cleanup(struct rte_eth_dev *dev, bool remove) /* Flush flows in all back-end devices before removing them */ bond_flow_ops.flush(dev, &ferror); - while (internals->slave_count != skipped) { - uint16_t port_id = internals->slaves[skipped].port_id; + while (internals->member_count != skipped) { + uint16_t port_id = internals->members[skipped].port_id; int ret; ret = rte_eth_dev_stop(port_id); @@ -2203,9 +2221,9 @@ bond_ethdev_cfg_cleanup(struct rte_eth_dev *dev, bool remove) continue; } - if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) { + if (rte_eth_bond_member_remove(bond_port_id, port_id) != 0) { RTE_BOND_LOG(ERR, - "Failed to remove port %d from bonded device %s", + "Failed to remove port %d from bonding device %s", port_id, dev->device->name); skipped++; } @@ -2220,7 +2238,7 @@ bond_ethdev_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); + RTE_BOND_LOG(INFO, "Closing bonding device %s", dev->device->name); bond_ethdev_cfg_cleanup(dev, true); @@ -2246,7 +2264,7 @@ static int bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct bond_dev_private *internals = dev->data->dev_private; - struct bond_slave_details slave; + struct bond_member_details member; int ret; uint16_t max_nb_rx_queues = UINT16_MAX; @@ -2258,32 +2276,32 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) internals->candidate_max_rx_pktlen : RTE_ETHER_MAX_JUMBO_FRAME_LEN; - /* Max number of tx/rx queues that the bonded device can support is the - * minimum values of the bonded slaves, as all slaves must be capable + /* Max number of tx/rx queues that the bonding device can support is the + * minimum values of the bonding members, as all members must be capable * of supporting the same number of tx/rx queues. */ - if (internals->slave_count > 0) { - struct rte_eth_dev_info slave_info; + if (internals->member_count > 0) { + struct rte_eth_dev_info member_info; uint16_t idx; - for (idx = 0; idx < internals->slave_count; idx++) { - slave = internals->slaves[idx]; - ret = rte_eth_dev_info_get(slave.port_id, &slave_info); + for (idx = 0; idx < internals->member_count; idx++) { + member = internals->members[idx]; + ret = rte_eth_dev_info_get(member.port_id, &member_info); if (ret != 0) { RTE_BOND_LOG(ERR, "%s: Error during getting device (port %u) info: %s\n", __func__, - slave.port_id, + member.port_id, strerror(-ret)); return ret; } - if (slave_info.max_rx_queues < max_nb_rx_queues) - max_nb_rx_queues = slave_info.max_rx_queues; + if (member_info.max_rx_queues < max_nb_rx_queues) + max_nb_rx_queues = member_info.max_rx_queues; - if (slave_info.max_tx_queues < max_nb_tx_queues) - max_nb_tx_queues = slave_info.max_tx_queues; + if (member_info.max_tx_queues < max_nb_tx_queues) + max_nb_tx_queues = member_info.max_tx_queues; } } @@ -2332,7 +2350,7 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) uint16_t i; struct bond_dev_private *internals = dev->data->dev_private; - /* don't do this while a slave is being added */ + /* don't do this while a member is being added */ rte_spinlock_lock(&internals->lock); if (on) @@ -2340,13 +2358,13 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) else rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id); - for (i = 0; i < internals->slave_count; i++) { - uint16_t port_id = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) { + uint16_t port_id = internals->members[i].port_id; res = rte_eth_dev_vlan_filter(port_id, vlan_id, on); if (res == ENOTSUP) RTE_BOND_LOG(WARNING, - "Setting VLAN filter on slave port %u not supported.", + "Setting VLAN filter on member port %u not supported.", port_id); } @@ -2424,59 +2442,59 @@ bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id) } static void -bond_ethdev_slave_link_status_change_monitor(void *cb_arg) +bond_ethdev_member_link_status_change_monitor(void *cb_arg) { - struct rte_eth_dev *bonded_ethdev, *slave_ethdev; + struct rte_eth_dev *bonding_ethdev, *member_ethdev; struct bond_dev_private *internals; - /* Default value for polling slave found is true as we don't want to + /* Default value for polling member found is true as we don't want to * disable the polling thread if we cannot get the lock */ - int i, polling_slave_found = 1; + int i, polling_member_found = 1; if (cb_arg == NULL) return; - bonded_ethdev = cb_arg; - internals = bonded_ethdev->data->dev_private; + bonding_ethdev = cb_arg; + internals = bonding_ethdev->data->dev_private; - if (!bonded_ethdev->data->dev_started || + if (!bonding_ethdev->data->dev_started || !internals->link_status_polling_enabled) return; - /* If device is currently being configured then don't check slaves link + /* If device is currently being configured then don't check members link * status, wait until next period */ if (rte_spinlock_trylock(&internals->lock)) { - if (internals->slave_count > 0) - polling_slave_found = 0; + if (internals->member_count > 0) + polling_member_found = 0; - for (i = 0; i < internals->slave_count; i++) { - if (!internals->slaves[i].link_status_poll_enabled) + for (i = 0; i < internals->member_count; i++) { + if (!internals->members[i].link_status_poll_enabled) continue; - slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id]; - polling_slave_found = 1; + member_ethdev = &rte_eth_devices[internals->members[i].port_id]; + polling_member_found = 1; - /* Update slave link status */ - (*slave_ethdev->dev_ops->link_update)(slave_ethdev, - internals->slaves[i].link_status_wait_to_complete); + /* Update member link status */ + (*member_ethdev->dev_ops->link_update)(member_ethdev, + internals->members[i].link_status_wait_to_complete); /* if link status has changed since last checked then call lsc * event callback */ - if (slave_ethdev->data->dev_link.link_status != - internals->slaves[i].last_link_status) { - bond_ethdev_lsc_event_callback(internals->slaves[i].port_id, + if (member_ethdev->data->dev_link.link_status != + internals->members[i].last_link_status) { + bond_ethdev_lsc_event_callback(internals->members[i].port_id, RTE_ETH_EVENT_INTR_LSC, - &bonded_ethdev->data->port_id, + &bonding_ethdev->data->port_id, NULL); } } rte_spinlock_unlock(&internals->lock); } - if (polling_slave_found) - /* Set alarm to continue monitoring link status of slave ethdev's */ + if (polling_member_found) + /* Set alarm to continue monitoring link status of member ethdev's */ rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000, - bond_ethdev_slave_link_status_change_monitor, cb_arg); + bond_ethdev_member_link_status_change_monitor, cb_arg); } static int @@ -2485,7 +2503,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link); struct bond_dev_private *bond_ctx; - struct rte_eth_link slave_link; + struct rte_eth_link member_link; bool one_link_update_succeeded; uint32_t idx; @@ -2496,7 +2514,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; if (ethdev->data->dev_started == 0 || - bond_ctx->active_slave_count == 0) { + bond_ctx->active_member_count == 0) { ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; return 0; } @@ -2512,51 +2530,51 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) case BONDING_MODE_BROADCAST: /** * Setting link speed to UINT32_MAX to ensure we pick up the - * value of the first active slave + * value of the first active member */ ethdev->data->dev_link.link_speed = UINT32_MAX; /** - * link speed is minimum value of all the slaves link speed as - * packet loss will occur on this slave if transmission at rates + * link speed is minimum value of all the members link speed as + * packet loss will occur on this member if transmission at rates * greater than this are attempted */ - for (idx = 0; idx < bond_ctx->active_slave_count; idx++) { - ret = link_update(bond_ctx->active_slaves[idx], - &slave_link); + for (idx = 0; idx < bond_ctx->active_member_count; idx++) { + ret = link_update(bond_ctx->active_members[idx], + &member_link); if (ret < 0) { ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; RTE_BOND_LOG(ERR, - "Slave (port %u) link get failed: %s", - bond_ctx->active_slaves[idx], + "Member (port %u) link get failed: %s", + bond_ctx->active_members[idx], rte_strerror(-ret)); return 0; } - if (slave_link.link_speed < + if (member_link.link_speed < ethdev->data->dev_link.link_speed) ethdev->data->dev_link.link_speed = - slave_link.link_speed; + member_link.link_speed; } break; case BONDING_MODE_ACTIVE_BACKUP: - /* Current primary slave */ - ret = link_update(bond_ctx->current_primary_port, &slave_link); + /* Current primary member */ + ret = link_update(bond_ctx->current_primary_port, &member_link); if (ret < 0) { - RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s", + RTE_BOND_LOG(ERR, "Member (port %u) link get failed: %s", bond_ctx->current_primary_port, rte_strerror(-ret)); return 0; } - ethdev->data->dev_link.link_speed = slave_link.link_speed; + ethdev->data->dev_link.link_speed = member_link.link_speed; break; case BONDING_MODE_8023AD: ethdev->data->dev_link.link_autoneg = - bond_ctx->mode4.slave_link.link_autoneg; + bond_ctx->mode4.member_link.link_autoneg; ethdev->data->dev_link.link_duplex = - bond_ctx->mode4.slave_link.link_duplex; + bond_ctx->mode4.member_link.link_duplex; /* fall through */ /* to update link speed */ case BONDING_MODE_ROUND_ROBIN: @@ -2566,29 +2584,29 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) default: /** * In theses mode the maximum theoretical link speed is the sum - * of all the slaves + * of all the members */ ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; one_link_update_succeeded = false; - for (idx = 0; idx < bond_ctx->active_slave_count; idx++) { - ret = link_update(bond_ctx->active_slaves[idx], - &slave_link); + for (idx = 0; idx < bond_ctx->active_member_count; idx++) { + ret = link_update(bond_ctx->active_members[idx], + &member_link); if (ret < 0) { RTE_BOND_LOG(ERR, - "Slave (port %u) link get failed: %s", - bond_ctx->active_slaves[idx], + "Member (port %u) link get failed: %s", + bond_ctx->active_members[idx], rte_strerror(-ret)); continue; } one_link_update_succeeded = true; ethdev->data->dev_link.link_speed += - slave_link.link_speed; + member_link.link_speed; } if (!one_link_update_succeeded) { - RTE_BOND_LOG(ERR, "All slaves link get failed"); + RTE_BOND_LOG(ERR, "All members link get failed"); return 0; } } @@ -2602,27 +2620,27 @@ static int bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct bond_dev_private *internals = dev->data->dev_private; - struct rte_eth_stats slave_stats; + struct rte_eth_stats member_stats; int i, j; - for (i = 0; i < internals->slave_count; i++) { - rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats); + for (i = 0; i < internals->member_count; i++) { + rte_eth_stats_get(internals->members[i].port_id, &member_stats); - stats->ipackets += slave_stats.ipackets; - stats->opackets += slave_stats.opackets; - stats->ibytes += slave_stats.ibytes; - stats->obytes += slave_stats.obytes; - stats->imissed += slave_stats.imissed; - stats->ierrors += slave_stats.ierrors; - stats->oerrors += slave_stats.oerrors; - stats->rx_nombuf += slave_stats.rx_nombuf; + stats->ipackets += member_stats.ipackets; + stats->opackets += member_stats.opackets; + stats->ibytes += member_stats.ibytes; + stats->obytes += member_stats.obytes; + stats->imissed += member_stats.imissed; + stats->ierrors += member_stats.ierrors; + stats->oerrors += member_stats.oerrors; + stats->rx_nombuf += member_stats.rx_nombuf; for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { - stats->q_ipackets[j] += slave_stats.q_ipackets[j]; - stats->q_opackets[j] += slave_stats.q_opackets[j]; - stats->q_ibytes[j] += slave_stats.q_ibytes[j]; - stats->q_obytes[j] += slave_stats.q_obytes[j]; - stats->q_errors[j] += slave_stats.q_errors[j]; + stats->q_ipackets[j] += member_stats.q_ipackets[j]; + stats->q_opackets[j] += member_stats.q_opackets[j]; + stats->q_ibytes[j] += member_stats.q_ibytes[j]; + stats->q_obytes[j] += member_stats.q_obytes[j]; + stats->q_errors[j] += member_stats.q_errors[j]; } } @@ -2638,8 +2656,8 @@ bond_ethdev_stats_reset(struct rte_eth_dev *dev) int err; int ret; - for (i = 0, err = 0; i < internals->slave_count; i++) { - ret = rte_eth_stats_reset(internals->slaves[i].port_id); + for (i = 0, err = 0; i < internals->member_count; i++) { + ret = rte_eth_stats_reset(internals->members[i].port_id); if (ret != 0) err = ret; } @@ -2656,15 +2674,15 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) uint16_t port_id; switch (internals->mode) { - /* Promiscuous mode is propagated to all slaves */ + /* Promiscuous mode is propagated to all members */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: case BONDING_MODE_8023AD: { - unsigned int slave_ok = 0; + unsigned int member_ok = 0; - for (i = 0; i < internals->slave_count; i++) { - port_id = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) { + port_id = internals->members[i].port_id; ret = rte_eth_promiscuous_enable(port_id); if (ret != 0) @@ -2672,23 +2690,23 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) "Failed to enable promiscuous mode for port %u: %s", port_id, rte_strerror(-ret)); else - slave_ok++; + member_ok++; } /* * Report success if operation is successful on at least - * on one slave. Otherwise return last error code. + * on one member. Otherwise return last error code. */ - if (slave_ok > 0) + if (member_ok > 0) ret = 0; break; } - /* Promiscuous mode is propagated only to primary slave */ + /* Promiscuous mode is propagated only to primary member */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: /* Do not touch promisc when there cannot be primary ports */ - if (internals->slave_count == 0) + if (internals->member_count == 0) break; port_id = internals->current_primary_port; ret = rte_eth_promiscuous_enable(port_id); @@ -2710,20 +2728,20 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) uint16_t port_id; switch (internals->mode) { - /* Promiscuous mode is propagated to all slaves */ + /* Promiscuous mode is propagated to all members */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: case BONDING_MODE_8023AD: { - unsigned int slave_ok = 0; + unsigned int member_ok = 0; - for (i = 0; i < internals->slave_count; i++) { - port_id = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) { + port_id = internals->members[i].port_id; if (internals->mode == BONDING_MODE_8023AD && bond_mode_8023ad_ports[port_id].forced_rx_flags == BOND_8023AD_FORCED_PROMISC) { - slave_ok++; + member_ok++; continue; } ret = rte_eth_promiscuous_disable(port_id); @@ -2732,23 +2750,23 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) "Failed to disable promiscuous mode for port %u: %s", port_id, rte_strerror(-ret)); else - slave_ok++; + member_ok++; } /* * Report success if operation is successful on at least - * on one slave. Otherwise return last error code. + * on one member. Otherwise return last error code. */ - if (slave_ok > 0) + if (member_ok > 0) ret = 0; break; } - /* Promiscuous mode is propagated only to primary slave */ + /* Promiscuous mode is propagated only to primary member */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: /* Do not touch promisc when there cannot be primary ports */ - if (internals->slave_count == 0) + if (internals->member_count == 0) break; port_id = internals->current_primary_port; ret = rte_eth_promiscuous_disable(port_id); @@ -2772,7 +2790,7 @@ bond_ethdev_promiscuous_update(struct rte_eth_dev *dev) case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: case BONDING_MODE_8023AD: - /* As promiscuous mode is propagated to all slaves for these + /* As promiscuous mode is propagated to all members for these * mode, no need to update for bonding device. */ break; @@ -2780,9 +2798,9 @@ bond_ethdev_promiscuous_update(struct rte_eth_dev *dev) case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: - /* As promiscuous mode is propagated only to primary slave + /* As promiscuous mode is propagated only to primary member * for these mode. When active/standby switchover, promiscuous - * mode should be set to new primary slave according to bonding + * mode should be set to new primary member according to bonding * device. */ if (rte_eth_promiscuous_get(internals->port_id) == 1) @@ -2803,15 +2821,15 @@ bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) uint16_t port_id; switch (internals->mode) { - /* allmulti mode is propagated to all slaves */ + /* allmulti mode is propagated to all members */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: case BONDING_MODE_8023AD: { - unsigned int slave_ok = 0; + unsigned int member_ok = 0; - for (i = 0; i < internals->slave_count; i++) { - port_id = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) { + port_id = internals->members[i].port_id; ret = rte_eth_allmulticast_enable(port_id); if (ret != 0) @@ -2819,23 +2837,23 @@ bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) "Failed to enable allmulti mode for port %u: %s", port_id, rte_strerror(-ret)); else - slave_ok++; + member_ok++; } /* * Report success if operation is successful on at least - * on one slave. Otherwise return last error code. + * on one member. Otherwise return last error code. */ - if (slave_ok > 0) + if (member_ok > 0) ret = 0; break; } - /* allmulti mode is propagated only to primary slave */ + /* allmulti mode is propagated only to primary member */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: /* Do not touch allmulti when there cannot be primary ports */ - if (internals->slave_count == 0) + if (internals->member_count == 0) break; port_id = internals->current_primary_port; ret = rte_eth_allmulticast_enable(port_id); @@ -2857,15 +2875,15 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) uint16_t port_id; switch (internals->mode) { - /* allmulti mode is propagated to all slaves */ + /* allmulti mode is propagated to all members */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: case BONDING_MODE_8023AD: { - unsigned int slave_ok = 0; + unsigned int member_ok = 0; - for (i = 0; i < internals->slave_count; i++) { - uint16_t port_id = internals->slaves[i].port_id; + for (i = 0; i < internals->member_count; i++) { + uint16_t port_id = internals->members[i].port_id; if (internals->mode == BONDING_MODE_8023AD && bond_mode_8023ad_ports[port_id].forced_rx_flags == @@ -2878,23 +2896,23 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) "Failed to disable allmulti mode for port %u: %s", port_id, rte_strerror(-ret)); else - slave_ok++; + member_ok++; } /* * Report success if operation is successful on at least - * on one slave. Otherwise return last error code. + * on one member. Otherwise return last error code. */ - if (slave_ok > 0) + if (member_ok > 0) ret = 0; break; } - /* allmulti mode is propagated only to primary slave */ + /* allmulti mode is propagated only to primary member */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: /* Do not touch allmulti when there cannot be primary ports */ - if (internals->slave_count == 0) + if (internals->member_count == 0) break; port_id = internals->current_primary_port; ret = rte_eth_allmulticast_disable(port_id); @@ -2918,7 +2936,7 @@ bond_ethdev_allmulticast_update(struct rte_eth_dev *dev) case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: case BONDING_MODE_8023AD: - /* As allmulticast mode is propagated to all slaves for these + /* As allmulticast mode is propagated to all members for these * mode, no need to update for bonding device. */ break; @@ -2926,9 +2944,9 @@ bond_ethdev_allmulticast_update(struct rte_eth_dev *dev) case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: - /* As allmulticast mode is propagated only to primary slave + /* As allmulticast mode is propagated only to primary member * for these mode. When active/standby switchover, allmulticast - * mode should be set to new primary slave according to bonding + * mode should be set to new primary member according to bonding * device. */ if (rte_eth_allmulticast_get(internals->port_id) == 1) @@ -2954,87 +2972,87 @@ int bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param, void *ret_param __rte_unused) { - struct rte_eth_dev *bonded_eth_dev; + struct rte_eth_dev *bonding_eth_dev; struct bond_dev_private *internals; struct rte_eth_link link; int rc = -1; int ret; uint8_t lsc_flag = 0; - int valid_slave = 0; - uint16_t active_pos, slave_idx; + int valid_member = 0; + uint16_t active_pos, member_idx; uint16_t i; if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL) return rc; - bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param]; + bonding_eth_dev = &rte_eth_devices[*(uint16_t *)param]; - if (check_for_bonded_ethdev(bonded_eth_dev)) + if (check_for_bonding_ethdev(bonding_eth_dev)) return rc; - internals = bonded_eth_dev->data->dev_private; + internals = bonding_eth_dev->data->dev_private; /* If the device isn't started don't handle interrupts */ - if (!bonded_eth_dev->data->dev_started) + if (!bonding_eth_dev->data->dev_started) return rc; - /* verify that port_id is a valid slave of bonded port */ - for (i = 0; i < internals->slave_count; i++) { - if (internals->slaves[i].port_id == port_id) { - valid_slave = 1; - slave_idx = i; + /* verify that port_id is a valid member of bonding port */ + for (i = 0; i < internals->member_count; i++) { + if (internals->members[i].port_id == port_id) { + valid_member = 1; + member_idx = i; break; } } - if (!valid_slave) + if (!valid_member) return rc; /* Synchronize lsc callback parallel calls either by real link event - * from the slaves PMDs or by the bonding PMD itself. + * from the members PMDs or by the bonding PMD itself. */ rte_spinlock_lock(&internals->lsc_lock); /* Search for port in active port list */ - active_pos = find_slave_by_id(internals->active_slaves, - internals->active_slave_count, port_id); + active_pos = find_member_by_id(internals->active_members, + internals->active_member_count, port_id); ret = rte_eth_link_get_nowait(port_id, &link); if (ret < 0) - RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id); + RTE_BOND_LOG(ERR, "Member (port %u) link get failed", port_id); if (ret == 0 && link.link_status) { - if (active_pos < internals->active_slave_count) + if (active_pos < internals->active_member_count) goto link_update; - /* check link state properties if bonded link is up*/ - if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) { - if (link_properties_valid(bonded_eth_dev, &link) != 0) + /* check link state properties if bonding link is up*/ + if (bonding_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) { + if (link_properties_valid(bonding_eth_dev, &link) != 0) RTE_BOND_LOG(ERR, "Invalid link properties " - "for slave %d in bonding mode %d", + "for member %d in bonding mode %d", port_id, internals->mode); } else { - /* inherit slave link properties */ - link_properties_set(bonded_eth_dev, &link); + /* inherit member link properties */ + link_properties_set(bonding_eth_dev, &link); } - /* If no active slave ports then set this port to be + /* If no active member ports then set this port to be * the primary port. */ - if (internals->active_slave_count < 1) { - /* If first active slave, then change link status */ - bonded_eth_dev->data->dev_link.link_status = + if (internals->active_member_count < 1) { + /* If first active member, then change link status */ + bonding_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; internals->current_primary_port = port_id; lsc_flag = 1; - mac_address_slaves_update(bonded_eth_dev); - bond_ethdev_promiscuous_update(bonded_eth_dev); - bond_ethdev_allmulticast_update(bonded_eth_dev); + mac_address_members_update(bonding_eth_dev); + bond_ethdev_promiscuous_update(bonding_eth_dev); + bond_ethdev_allmulticast_update(bonding_eth_dev); } - activate_slave(bonded_eth_dev, port_id); + activate_member(bonding_eth_dev, port_id); /* If the user has defined the primary port then default to * using it. @@ -3043,51 +3061,51 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, internals->primary_port == port_id) bond_ethdev_primary_set(internals, port_id); } else { - if (active_pos == internals->active_slave_count) + if (active_pos == internals->active_member_count) goto link_update; - /* Remove from active slave list */ - deactivate_slave(bonded_eth_dev, port_id); + /* Remove from active member list */ + deactivate_member(bonding_eth_dev, port_id); - if (internals->active_slave_count < 1) + if (internals->active_member_count < 1) lsc_flag = 1; - /* Update primary id, take first active slave from list or if none + /* Update primary id, take first active member from list or if none * available set to -1 */ if (port_id == internals->current_primary_port) { - if (internals->active_slave_count > 0) + if (internals->active_member_count > 0) bond_ethdev_primary_set(internals, - internals->active_slaves[0]); + internals->active_members[0]); else internals->current_primary_port = internals->primary_port; - mac_address_slaves_update(bonded_eth_dev); - bond_ethdev_promiscuous_update(bonded_eth_dev); - bond_ethdev_allmulticast_update(bonded_eth_dev); + mac_address_members_update(bonding_eth_dev); + bond_ethdev_promiscuous_update(bonding_eth_dev); + bond_ethdev_allmulticast_update(bonding_eth_dev); } } link_update: /** - * Update bonded device link properties after any change to active - * slaves + * Update bonding device link properties after any change to active + * members */ - bond_ethdev_link_update(bonded_eth_dev, 0); - internals->slaves[slave_idx].last_link_status = link.link_status; + bond_ethdev_link_update(bonding_eth_dev, 0); + internals->members[member_idx].last_link_status = link.link_status; if (lsc_flag) { /* Cancel any possible outstanding interrupts if delays are enabled */ if (internals->link_up_delay_ms > 0 || internals->link_down_delay_ms > 0) rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation, - bonded_eth_dev); + bonding_eth_dev); - if (bonded_eth_dev->data->dev_link.link_status) { + if (bonding_eth_dev->data->dev_link.link_status) { if (internals->link_up_delay_ms > 0) rte_eal_alarm_set(internals->link_up_delay_ms * 1000, bond_ethdev_delayed_lsc_propagation, - (void *)bonded_eth_dev); + (void *)bonding_eth_dev); else - rte_eth_dev_callback_process(bonded_eth_dev, + rte_eth_dev_callback_process(bonding_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -3095,9 +3113,9 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, if (internals->link_down_delay_ms > 0) rte_eal_alarm_set(internals->link_down_delay_ms * 1000, bond_ethdev_delayed_lsc_propagation, - (void *)bonded_eth_dev); + (void *)bonding_eth_dev); else - rte_eth_dev_callback_process(bonded_eth_dev, + rte_eth_dev_callback_process(bonding_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -3114,7 +3132,7 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev, { unsigned i, j; int result = 0; - int slave_reta_size; + int member_reta_size; unsigned reta_count; struct bond_dev_private *internals = dev->data->dev_private; @@ -3137,11 +3155,11 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev, memcpy(&internals->reta_conf[i], &internals->reta_conf[0], sizeof(internals->reta_conf[0]) * reta_count); - /* Propagate RETA over slaves */ - for (i = 0; i < internals->slave_count; i++) { - slave_reta_size = internals->slaves[i].reta_size; - result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id, - &internals->reta_conf[0], slave_reta_size); + /* Propagate RETA over members */ + for (i = 0; i < internals->member_count; i++) { + member_reta_size = internals->members[i].reta_size; + result = rte_eth_dev_rss_reta_update(internals->members[i].port_id, + &internals->reta_conf[0], member_reta_size); if (result < 0) return result; } @@ -3194,8 +3212,8 @@ bond_ethdev_rss_hash_update(struct rte_eth_dev *dev, bond_rss_conf.rss_key_len = internals->rss_key_len; } - for (i = 0; i < internals->slave_count; i++) { - result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id, + for (i = 0; i < internals->member_count; i++) { + result = rte_eth_dev_rss_hash_update(internals->members[i].port_id, &bond_rss_conf); if (result < 0) return result; @@ -3221,21 +3239,21 @@ bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev, static int bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { - struct rte_eth_dev *slave_eth_dev; + struct rte_eth_dev *member_eth_dev; struct bond_dev_private *internals = dev->data->dev_private; int ret, i; rte_spinlock_lock(&internals->lock); - for (i = 0; i < internals->slave_count; i++) { - slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; - if (*slave_eth_dev->dev_ops->mtu_set == NULL) { + for (i = 0; i < internals->member_count; i++) { + member_eth_dev = &rte_eth_devices[internals->members[i].port_id]; + if (*member_eth_dev->dev_ops->mtu_set == NULL) { rte_spinlock_unlock(&internals->lock); return -ENOTSUP; } } - for (i = 0; i < internals->slave_count; i++) { - ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu); + for (i = 0; i < internals->member_count; i++) { + ret = rte_eth_dev_set_mtu(internals->members[i].port_id, mtu); if (ret < 0) { rte_spinlock_unlock(&internals->lock); return ret; @@ -3271,29 +3289,29 @@ bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, __rte_unused uint32_t index, uint32_t vmdq) { - struct rte_eth_dev *slave_eth_dev; + struct rte_eth_dev *member_eth_dev; struct bond_dev_private *internals = dev->data->dev_private; int ret, i; rte_spinlock_lock(&internals->lock); - for (i = 0; i < internals->slave_count; i++) { - slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; - if (*slave_eth_dev->dev_ops->mac_addr_add == NULL || - *slave_eth_dev->dev_ops->mac_addr_remove == NULL) { + for (i = 0; i < internals->member_count; i++) { + member_eth_dev = &rte_eth_devices[internals->members[i].port_id]; + if (*member_eth_dev->dev_ops->mac_addr_add == NULL || + *member_eth_dev->dev_ops->mac_addr_remove == NULL) { ret = -ENOTSUP; goto end; } } - for (i = 0; i < internals->slave_count; i++) { - ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id, + for (i = 0; i < internals->member_count; i++) { + ret = rte_eth_dev_mac_addr_add(internals->members[i].port_id, mac_addr, vmdq); if (ret < 0) { /* rollback */ for (i--; i >= 0; i--) rte_eth_dev_mac_addr_remove( - internals->slaves[i].port_id, mac_addr); + internals->members[i].port_id, mac_addr); goto end; } } @@ -3307,22 +3325,22 @@ bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, static void bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { - struct rte_eth_dev *slave_eth_dev; + struct rte_eth_dev *member_eth_dev; struct bond_dev_private *internals = dev->data->dev_private; int i; rte_spinlock_lock(&internals->lock); - for (i = 0; i < internals->slave_count; i++) { - slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; - if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL) + for (i = 0; i < internals->member_count; i++) { + member_eth_dev = &rte_eth_devices[internals->members[i].port_id]; + if (*member_eth_dev->dev_ops->mac_addr_remove == NULL) goto end; } struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index]; - for (i = 0; i < internals->slave_count; i++) - rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id, + for (i = 0; i < internals->member_count; i++) + rte_eth_dev_mac_addr_remove(internals->members[i].port_id, mac_addr); end: @@ -3402,30 +3420,30 @@ dump_basic(const struct rte_eth_dev *dev, FILE *f) fprintf(f, "\n"); } - if (internals->slave_count > 0) { - fprintf(f, "\tSlaves (%u): [", internals->slave_count); - for (i = 0; i < internals->slave_count - 1; i++) - fprintf(f, "%u ", internals->slaves[i].port_id); + if (internals->member_count > 0) { + fprintf(f, "\tMembers (%u): [", internals->member_count); + for (i = 0; i < internals->member_count - 1; i++) + fprintf(f, "%u ", internals->members[i].port_id); - fprintf(f, "%u]\n", internals->slaves[internals->slave_count - 1].port_id); + fprintf(f, "%u]\n", internals->members[internals->member_count - 1].port_id); } else { - fprintf(f, "\tSlaves: []\n"); + fprintf(f, "\tMembers: []\n"); } - if (internals->active_slave_count > 0) { - fprintf(f, "\tActive Slaves (%u): [", internals->active_slave_count); - for (i = 0; i < internals->active_slave_count - 1; i++) - fprintf(f, "%u ", internals->active_slaves[i]); + if (internals->active_member_count > 0) { + fprintf(f, "\tActive Members (%u): [", internals->active_member_count); + for (i = 0; i < internals->active_member_count - 1; i++) + fprintf(f, "%u ", internals->active_members[i]); - fprintf(f, "%u]\n", internals->active_slaves[internals->active_slave_count - 1]); + fprintf(f, "%u]\n", internals->active_members[internals->active_member_count - 1]); } else { - fprintf(f, "\tActive Slaves: []\n"); + fprintf(f, "\tActive Members: []\n"); } if (internals->user_defined_primary_port) fprintf(f, "\tUser Defined Primary: [%u]\n", internals->primary_port); - if (internals->slave_count > 0) + if (internals->member_count > 0) fprintf(f, "\tCurrent Primary: [%u]\n", internals->current_primary_port); } @@ -3471,7 +3489,7 @@ dump_lacp_port_param(const struct port_params *params, FILE *f) } static void -dump_lacp_slave(const struct rte_eth_bond_8023ad_slave_info *info, FILE *f) +dump_lacp_member(const struct rte_eth_bond_8023ad_member_info *info, FILE *f) { char a_state[256] = { 0 }; char p_state[256] = { 0 }; @@ -3520,18 +3538,18 @@ dump_lacp_slave(const struct rte_eth_bond_8023ad_slave_info *info, FILE *f) static void dump_lacp(uint16_t port_id, FILE *f) { - struct rte_eth_bond_8023ad_slave_info slave_info; + struct rte_eth_bond_8023ad_member_info member_info; struct rte_eth_bond_8023ad_conf port_conf; - uint16_t slaves[RTE_MAX_ETHPORTS]; - int num_active_slaves; + uint16_t members[RTE_MAX_ETHPORTS]; + int num_active_members; int i, ret; fprintf(f, " - Lacp info:\n"); - num_active_slaves = rte_eth_bond_active_slaves_get(port_id, slaves, + num_active_members = rte_eth_bond_active_members_get(port_id, members, RTE_MAX_ETHPORTS); - if (num_active_slaves < 0) { - fprintf(f, "\tFailed to get active slave list for port %u\n", + if (num_active_members < 0) { + fprintf(f, "\tFailed to get active member list for port %u\n", port_id); return; } @@ -3539,22 +3557,22 @@ dump_lacp(uint16_t port_id, FILE *f) fprintf(f, "\tIEEE802.3 port: %u\n", port_id); ret = rte_eth_bond_8023ad_conf_get(port_id, &port_conf); if (ret) { - fprintf(f, "\tGet bonded device %u 8023ad config failed\n", + fprintf(f, "\tGet bonding device %u 8023ad config failed\n", port_id); return; } dump_lacp_conf(&port_conf, f); - for (i = 0; i < num_active_slaves; i++) { - ret = rte_eth_bond_8023ad_slave_info(port_id, slaves[i], - &slave_info); + for (i = 0; i < num_active_members; i++) { + ret = rte_eth_bond_8023ad_member_info(port_id, members[i], + &member_info); if (ret) { - fprintf(f, "\tGet slave device %u 8023ad info failed\n", - slaves[i]); + fprintf(f, "\tGet member device %u 8023ad info failed\n", + members[i]); return; } - fprintf(f, "\tSlave Port: %u\n", slaves[i]); - dump_lacp_slave(&slave_info, f); + fprintf(f, "\tMember Port: %u\n", members[i]); + dump_lacp_member(&member_info, f); } } @@ -3655,8 +3673,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->link_down_delay_ms = 0; internals->link_up_delay_ms = 0; - internals->slave_count = 0; - internals->active_slave_count = 0; + internals->member_count = 0; + internals->active_member_count = 0; internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; internals->rx_queue_offload_capa = 0; @@ -3684,8 +3702,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->rx_desc_lim.nb_align = 1; internals->tx_desc_lim.nb_align = 1; - memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); - memset(internals->slaves, 0, sizeof(internals->slaves)); + memset(internals->active_members, 0, sizeof(internals->active_members)); + memset(internals->members, 0, sizeof(internals->members)); TAILQ_INIT(&internals->flow_list); internals->flow_isolated_valid = 0; @@ -3693,7 +3711,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) /* Set mode 4 default configuration */ bond_mode_8023ad_setup(eth_dev, NULL); if (bond_ethdev_mode_set(eth_dev, mode)) { - RTE_BOND_LOG(ERR, "Failed to set bonded device %u mode to %u", + RTE_BOND_LOG(ERR, "Failed to set bonding device %u mode to %u", eth_dev->data->port_id, mode); goto err; } @@ -3704,7 +3722,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) RTE_CACHE_LINE_SIZE); if (internals->vlan_filter_bmpmem == NULL) { RTE_BOND_LOG(ERR, - "Failed to allocate vlan bitmap for bonded device %u", + "Failed to allocate vlan bitmap for bonding device %u", eth_dev->data->port_id); goto err; } @@ -3713,7 +3731,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->vlan_filter_bmpmem, vlan_filter_bmp_size); if (internals->vlan_filter_bmp == NULL) { RTE_BOND_LOG(ERR, - "Failed to init vlan bitmap for bonded device %u", + "Failed to init vlan bitmap for bonding device %u", eth_dev->data->port_id); rte_free(internals->vlan_filter_bmpmem); goto err; @@ -3770,14 +3788,14 @@ bond_probe(struct rte_vdev_device *dev) /* Parse link bonding mode */ if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG, - &bond_ethdev_parse_slave_mode_kvarg, + &bond_ethdev_parse_member_mode_kvarg, &bonding_mode) != 0) { - RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s", + RTE_BOND_LOG(ERR, "Invalid mode for bonding device %s", name); goto parse_error; } } else { - RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded " + RTE_BOND_LOG(ERR, "Mode must be specified only once for bonding " "device %s", name); goto parse_error; } @@ -3789,12 +3807,12 @@ bond_probe(struct rte_vdev_device *dev) &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) { RTE_BOND_LOG(ERR, "Invalid socket Id specified for " - "bonded device %s", name); + "bonding device %s", name); goto parse_error; } } else if (arg_count > 1) { RTE_BOND_LOG(ERR, "Socket Id can be specified only once for " - "bonded device %s", name); + "bonding device %s", name); goto parse_error; } else { socket_id = rte_socket_id(); @@ -3815,10 +3833,10 @@ bond_probe(struct rte_vdev_device *dev) if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_AGG_MODE_KVARG, - &bond_ethdev_parse_slave_agg_mode_kvarg, + &bond_ethdev_parse_member_agg_mode_kvarg, &agg_mode) != 0) { RTE_BOND_LOG(ERR, - "Failed to parse agg selection mode for bonded device %s", + "Failed to parse agg selection mode for bonding device %s", name); goto parse_error; } @@ -3830,8 +3848,8 @@ bond_probe(struct rte_vdev_device *dev) } rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); - RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " - "socket %d.", name, port_id, bonding_mode, socket_id); + RTE_BOND_LOG(INFO, "Create bonding device %s on port %d in mode %u on " + "socket %u.", name, port_id, bonding_mode, socket_id); return 0; parse_error: @@ -3865,7 +3883,7 @@ bond_remove(struct rte_vdev_device *dev) RTE_ASSERT(eth_dev->device == &dev->device); internals = eth_dev->data->dev_private; - if (internals->slave_count != 0) + if (internals->member_count != 0) return -EBUSY; if (eth_dev->data->dev_started == 1) { @@ -3877,7 +3895,7 @@ bond_remove(struct rte_vdev_device *dev) return ret; } -/* this part will resolve the slave portids after all the other pdev and vdev +/* this part will resolve the member portids after all the other pdev and vdev * have been allocated */ static int bond_ethdev_configure(struct rte_eth_dev *dev) @@ -3959,7 +3977,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) if (link_speeds & RTE_ETH_LINK_SPEED_FIXED) { if ((link_speeds & (internals->speed_capa & ~RTE_ETH_LINK_SPEED_FIXED)) == 0) { - RTE_BOND_LOG(ERR, "the fixed speed is not supported by all slave devices."); + RTE_BOND_LOG(ERR, "the fixed speed is not supported by all member devices."); return -EINVAL; } /* @@ -3976,7 +3994,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) internals->max_rx_pktlen = internals->candidate_max_rx_pktlen; /* - * if no kvlist, it means that this bonded device has been created + * if no kvlist, it means that this bonding device has been created * through the bonding api. */ if (!kvlist || internals->kvargs_processing_is_done) @@ -3984,14 +4002,14 @@ bond_ethdev_configure(struct rte_eth_dev *dev) internals->kvargs_processing_is_done = true; - /* Parse MAC address for bonded device */ + /* Parse MAC address for bonding device */ arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG); if (arg_count == 1) { struct rte_ether_addr bond_mac; if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG, &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { - RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s", + RTE_BOND_LOG(INFO, "Invalid mac address for bonding device %s", name); return -1; } @@ -3999,13 +4017,13 @@ bond_ethdev_configure(struct rte_eth_dev *dev) /* Set MAC address */ if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) { RTE_BOND_LOG(ERR, - "Failed to set mac address on bonded device %s", + "Failed to set mac address on bonding device %s", name); return -1; } } else if (arg_count > 1) { RTE_BOND_LOG(ERR, - "MAC address can be specified only once for bonded device %s", + "MAC address can be specified only once for bonding device %s", name); return -1; } @@ -4019,7 +4037,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != 0) { RTE_BOND_LOG(INFO, - "Invalid xmit policy specified for bonded device %s", + "Invalid xmit policy specified for bonding device %s", name); return -1; } @@ -4027,13 +4045,13 @@ bond_ethdev_configure(struct rte_eth_dev *dev) /* Set balance mode transmit policy*/ if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) { RTE_BOND_LOG(ERR, - "Failed to set balance xmit policy on bonded device %s", + "Failed to set balance xmit policy on bonding device %s", name); return -1; } } else if (arg_count > 1) { RTE_BOND_LOG(ERR, - "Transmit policy can be specified only once for bonded device %s", + "Transmit policy can be specified only once for bonding device %s", name); return -1; } @@ -4041,10 +4059,10 @@ bond_ethdev_configure(struct rte_eth_dev *dev) if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_AGG_MODE_KVARG, - &bond_ethdev_parse_slave_agg_mode_kvarg, + &bond_ethdev_parse_member_agg_mode_kvarg, &agg_mode) != 0) { RTE_BOND_LOG(ERR, - "Failed to parse agg selection mode for bonded device %s", + "Failed to parse agg selection mode for bonding device %s", name); } if (internals->mode == BONDING_MODE_8023AD) { @@ -4052,67 +4070,67 @@ bond_ethdev_configure(struct rte_eth_dev *dev) agg_mode); if (ret < 0) { RTE_BOND_LOG(ERR, - "Invalid args for agg selection set for bonded device %s", + "Invalid args for agg selection set for bonding device %s", name); return -1; } } } - /* Parse/add slave ports to bonded device */ - if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) { - struct bond_ethdev_slave_ports slave_ports; + /* Parse/add member ports to bonding device */ + if (rte_kvargs_count(kvlist, PMD_BOND_MEMBER_PORT_KVARG) > 0) { + struct bond_ethdev_member_ports member_ports; unsigned i; - memset(&slave_ports, 0, sizeof(slave_ports)); + memset(&member_ports, 0, sizeof(member_ports)); - if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG, - &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { + if (rte_kvargs_process(kvlist, PMD_BOND_MEMBER_PORT_KVARG, + &bond_ethdev_parse_member_port_kvarg, &member_ports) != 0) { RTE_BOND_LOG(ERR, - "Failed to parse slave ports for bonded device %s", + "Failed to parse member ports for bonding device %s", name); return -1; } - for (i = 0; i < slave_ports.slave_count; i++) { - if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) { + for (i = 0; i < member_ports.member_count; i++) { + if (rte_eth_bond_member_add(port_id, member_ports.members[i]) != 0) { RTE_BOND_LOG(ERR, - "Failed to add port %d as slave to bonded device %s", - slave_ports.slaves[i], name); + "Failed to add port %d as member to bonding device %s", + member_ports.members[i], name); } } } else { - RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name); + RTE_BOND_LOG(INFO, "No members specified for bonding device %s", name); return -1; } - /* Parse/set primary slave port id*/ - arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG); + /* Parse/set primary member port id*/ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_MEMBER_KVARG); if (arg_count == 1) { - uint16_t primary_slave_port_id; + uint16_t primary_member_port_id; if (rte_kvargs_process(kvlist, - PMD_BOND_PRIMARY_SLAVE_KVARG, - &bond_ethdev_parse_primary_slave_port_id_kvarg, - &primary_slave_port_id) < 0) { + PMD_BOND_PRIMARY_MEMBER_KVARG, + &bond_ethdev_parse_primary_member_port_id_kvarg, + &primary_member_port_id) < 0) { RTE_BOND_LOG(INFO, - "Invalid primary slave port id specified for bonded device %s", + "Invalid primary member port id specified for bonding device %s", name); return -1; } /* Set balance mode transmit policy*/ - if (rte_eth_bond_primary_set(port_id, primary_slave_port_id) + if (rte_eth_bond_primary_set(port_id, primary_member_port_id) != 0) { RTE_BOND_LOG(ERR, - "Failed to set primary slave port %d on bonded device %s", - primary_slave_port_id, name); + "Failed to set primary member port %d on bonding device %s", + primary_member_port_id, name); return -1; } } else if (arg_count > 1) { RTE_BOND_LOG(INFO, - "Primary slave can be specified only once for bonded device %s", + "Primary member can be specified only once for bonding device %s", name); return -1; } @@ -4127,7 +4145,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) &bond_ethdev_parse_time_ms_kvarg, &lsc_poll_interval_ms) < 0) { RTE_BOND_LOG(INFO, - "Invalid lsc polling interval value specified for bonded" + "Invalid lsc polling interval value specified for bonding" " device %s", name); return -1; } @@ -4135,13 +4153,13 @@ bond_ethdev_configure(struct rte_eth_dev *dev) if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms) != 0) { RTE_BOND_LOG(ERR, - "Failed to set lsc monitor polling interval (%u ms) on bonded device %s", + "Failed to set lsc monitor polling interval (%u ms) on bonding device %s", lsc_poll_interval_ms, name); return -1; } } else if (arg_count > 1) { RTE_BOND_LOG(INFO, - "LSC polling interval can be specified only once for bonded" + "LSC polling interval can be specified only once for bonding" " device %s", name); return -1; } @@ -4157,7 +4175,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) &link_up_delay_ms) < 0) { RTE_BOND_LOG(INFO, "Invalid link up propagation delay value specified for" - " bonded device %s", name); + " bonding device %s", name); return -1; } @@ -4165,14 +4183,14 @@ bond_ethdev_configure(struct rte_eth_dev *dev) if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms) != 0) { RTE_BOND_LOG(ERR, - "Failed to set link up propagation delay (%u ms) on bonded" + "Failed to set link up propagation delay (%u ms) on bonding" " device %s", link_up_delay_ms, name); return -1; } } else if (arg_count > 1) { RTE_BOND_LOG(INFO, "Link up propagation delay can be specified only once for" - " bonded device %s", name); + " bonding device %s", name); return -1; } @@ -4187,7 +4205,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) &link_down_delay_ms) < 0) { RTE_BOND_LOG(INFO, "Invalid link down propagation delay value specified for" - " bonded device %s", name); + " bonding device %s", name); return -1; } @@ -4195,26 +4213,26 @@ bond_ethdev_configure(struct rte_eth_dev *dev) if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms) != 0) { RTE_BOND_LOG(ERR, - "Failed to set link down propagation delay (%u ms) on bonded device %s", + "Failed to set link down propagation delay (%u ms) on bonding device %s", link_down_delay_ms, name); return -1; } } else if (arg_count > 1) { RTE_BOND_LOG(INFO, - "Link down propagation delay can be specified only once for bonded device %s", + "Link down propagation delay can be specified only once for bonding device %s", name); return -1; } - /* configure slaves so we can pass mtu setting */ - for (i = 0; i < internals->slave_count; i++) { - struct rte_eth_dev *slave_ethdev = - &(rte_eth_devices[internals->slaves[i].port_id]); - if (slave_configure(dev, slave_ethdev) != 0) { + /* configure members so we can pass mtu setting */ + for (i = 0; i < internals->member_count; i++) { + struct rte_eth_dev *member_ethdev = + &(rte_eth_devices[internals->members[i].port_id]); + if (member_configure(dev, member_ethdev) != 0) { RTE_BOND_LOG(ERR, - "bonded port (%d) failed to configure slave device (%d)", + "bonding port (%d) failed to configure member device (%d)", dev->data->port_id, - internals->slaves[i].port_id); + internals->members[i].port_id); return -1; } } @@ -4230,7 +4248,7 @@ RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv); RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond); RTE_PMD_REGISTER_PARAM_STRING(net_bonding, - "slave= " + "member= " "primary= " "mode=[0-6] " "xmit_policy=[l2 | l23 | l34] " diff --git a/drivers/net/bonding/version.map b/drivers/net/bonding/version.map index bd28ee78a5e..09ee21c55f7 100644 --- a/drivers/net/bonding/version.map +++ b/drivers/net/bonding/version.map @@ -12,8 +12,6 @@ DPDK_24 { rte_eth_bond_8023ad_ext_distrib_get; rte_eth_bond_8023ad_ext_slowtx; rte_eth_bond_8023ad_setup; - rte_eth_bond_8023ad_slave_info; - rte_eth_bond_active_slaves_get; rte_eth_bond_create; rte_eth_bond_free; rte_eth_bond_link_monitoring_set; @@ -23,11 +21,18 @@ DPDK_24 { rte_eth_bond_mode_set; rte_eth_bond_primary_get; rte_eth_bond_primary_set; - rte_eth_bond_slave_add; - rte_eth_bond_slave_remove; - rte_eth_bond_slaves_get; rte_eth_bond_xmit_policy_get; rte_eth_bond_xmit_policy_set; local: *; }; + +EXPERIMENTAL { + # added in 23.11 + global: + rte_eth_bond_8023ad_member_info; + rte_eth_bond_active_members_get; + rte_eth_bond_member_add; + rte_eth_bond_member_remove; + rte_eth_bond_members_get; +}; diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index 4c4acc7cf06..4a4e97287c8 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -262,7 +262,7 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, txq->cpt_desc = inl_lf->nb_desc * 0.7; txq->sa_base = (uint64_t)dev->outb.sa_base; - txq->sa_base |= eth_dev->data->port_id; + txq->sa_base |= (uint64_t)eth_dev->data->port_id; PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16)); } @@ -355,11 +355,13 @@ cn10k_nix_rx_queue_meta_aura_update(struct rte_eth_dev *eth_dev) rq = &dev->rqs[i]; rxq = eth_dev->data->rx_queues[i]; rxq->meta_aura = rq->meta_aura_handle; + rxq->meta_pool = dev->nix.meta_mempool; /* Assume meta packet from normal aura if meta aura is not setup */ if (!rxq->meta_aura) { rxq_sp = cnxk_eth_rxq_to_sp(rxq); rxq->meta_aura = rxq_sp->qconf.mp->pool_id; + rxq->meta_pool = (uintptr_t)rxq_sp->qconf.mp; } } /* Store mempool in lookup mem */ @@ -639,14 +641,17 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev, if (!conf->flags) { /* Clear offload flags on disable */ - dev->rx_offload_flags &= ~NIX_RX_REAS_F; + if (!dev->inb.nb_oop) + dev->rx_offload_flags &= ~NIX_RX_REAS_F; + dev->inb.reass_en = false; return 0; } - rc = roc_nix_reassembly_configure(conf->timeout_ms, - conf->max_frags); - if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) + rc = roc_nix_reassembly_configure(conf->timeout_ms, conf->max_frags); + if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { dev->rx_offload_flags |= NIX_RX_REAS_F; + dev->inb.reass_en = true; + } return rc; } diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c index b98fc9378eb..fbd719913e7 100644 --- a/drivers/net/cnxk/cn10k_ethdev_sec.c +++ b/drivers/net/cnxk/cn10k_ethdev_sec.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -303,7 +304,7 @@ static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = { RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; -static const struct rte_security_capability cn10k_eth_sec_capabilities[] = { +static const struct rte_security_capability cn10k_eth_sec_ipsec_capabilities[] = { { /* IPsec Inline Protocol ESP Tunnel Ingress */ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, .protocol = RTE_SECURITY_PROTOCOL_IPSEC, @@ -324,6 +325,7 @@ static const struct rte_security_capability cn10k_eth_sec_capabilities[] = { .l4_csum_enable = 1, .stats = 1, .esn = 1, + .ingress_oop = 1, }, }, .crypto_capabilities = cn10k_eth_sec_crypto_caps, @@ -373,6 +375,7 @@ static const struct rte_security_capability cn10k_eth_sec_capabilities[] = { .l4_csum_enable = 1, .stats = 1, .esn = 1, + .ingress_oop = 1, }, }, .crypto_capabilities = cn10k_eth_sec_crypto_caps, @@ -396,16 +399,106 @@ static const struct rte_security_capability cn10k_eth_sec_capabilities[] = { .l4_csum_enable = 1, .stats = 1, .esn = 1, + .ingress_oop = 1, }, }, .crypto_capabilities = cn10k_eth_sec_crypto_caps, .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA }, - { - .action = RTE_SECURITY_ACTION_TYPE_NONE - } }; +static const struct rte_security_capability cn10k_eth_sec_macsec_capabilities[] = { + { /* MACsec Inline Protocol, AES-GCM-128 algo */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_MACSEC, + .macsec = { + .mtu = ROC_MCS_MAX_MTU, + .alg = RTE_SECURITY_MACSEC_ALG_GCM_128, + .max_nb_sc = 128, + .max_nb_sa = 256, + .max_nb_sess = 256, + .replay_win_sz = ROC_MCS_MAX_AR_WINSZ, + .relative_sectag_insert = 1, + .fixed_sectag_insert = 1, + .icv_include_da_sa = 1, + .ctrl_port_enable = 1, + .preserve_sectag = 1, + .preserve_icv = 1, + .validate_frames = 1, + .re_key = 1, + .anti_replay = 1, + }, + }, + { /* MACsec Inline Protocol, AES-GCM-256 algo */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_MACSEC, + .macsec = { + .mtu = ROC_MCS_MAX_MTU, + .alg = RTE_SECURITY_MACSEC_ALG_GCM_256, + .max_nb_sc = 128, + .max_nb_sa = 256, + .max_nb_sess = 256, + .replay_win_sz = ROC_MCS_MAX_AR_WINSZ, + .relative_sectag_insert = 1, + .fixed_sectag_insert = 1, + .icv_include_da_sa = 1, + .ctrl_port_enable = 1, + .preserve_sectag = 1, + .preserve_icv = 1, + .validate_frames = 1, + .re_key = 1, + .anti_replay = 1, + }, + }, + { /* MACsec Inline Protocol, AES-GCM-XPN-128 algo */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_MACSEC, + .macsec = { + .mtu = ROC_MCS_MAX_MTU, + .alg = RTE_SECURITY_MACSEC_ALG_GCM_XPN_128, + .max_nb_sc = 128, + .max_nb_sa = 256, + .max_nb_sess = 256, + .replay_win_sz = ROC_MCS_MAX_AR_WINSZ, + .relative_sectag_insert = 1, + .fixed_sectag_insert = 1, + .icv_include_da_sa = 1, + .ctrl_port_enable = 1, + .preserve_sectag = 1, + .preserve_icv = 1, + .validate_frames = 1, + .re_key = 1, + .anti_replay = 1, + }, + }, + { /* MACsec Inline Protocol, AES-GCM-XPN-256 algo */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_MACSEC, + .macsec = { + .mtu = ROC_MCS_MAX_MTU, + .alg = RTE_SECURITY_MACSEC_ALG_GCM_XPN_256, + .max_nb_sc = 128, + .max_nb_sa = 256, + .max_nb_sess = 256, + .replay_win_sz = ROC_MCS_MAX_AR_WINSZ, + .relative_sectag_insert = 1, + .fixed_sectag_insert = 1, + .icv_include_da_sa = 1, + .ctrl_port_enable = 1, + .preserve_sectag = 1, + .preserve_icv = 1, + .validate_frames = 1, + .re_key = 1, + .anti_replay = 1, + }, + }, +}; + +#define SEC_CAPS_LEN (RTE_DIM(cn10k_eth_sec_ipsec_capabilities) + \ + RTE_DIM(cn10k_eth_sec_macsec_capabilities) + 1) + +static struct rte_security_capability cn10k_eth_sec_capabilities[SEC_CAPS_LEN]; + static inline void cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf) { @@ -522,7 +615,7 @@ cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event) plt_warn("Outbound error, bad ip pkt, mbuf %p," " sa_index %u (total warnings %" PRIu64 ")", mbuf, sess_priv.sa_idx, warn_cnt); - desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN; + desc.subtype = -res->uc_compcode; break; default: warn_cnt++; @@ -532,7 +625,7 @@ cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event) " (total warnings %" PRIu64 ")", mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode, warn_cnt); - desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN; + desc.subtype = -res->uc_compcode; break; } @@ -657,6 +750,20 @@ cn10k_eth_sec_session_create(void *device, return -rte_errno; } + if (conf->ipsec.options.ingress_oop && + rte_security_oop_dynfield_offset < 0) { + /* Register for security OOP dynfield if required */ + if (rte_security_oop_dynfield_register() < 0) + return -rte_errno; + } + + /* We cannot support inbound reassembly and OOP together */ + if (conf->ipsec.options.ip_reassembly_en && + conf->ipsec.options.ingress_oop) { + plt_err("Cannot support Inbound reassembly and OOP together"); + return -ENOTSUP; + } + ipsec = &conf->ipsec; crypto = conf->crypto_xform; inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS); @@ -743,6 +850,12 @@ cn10k_eth_sec_session_create(void *device, inb_sa_dptr->w0.s.count_mib_bytes = 1; inb_sa_dptr->w0.s.count_mib_pkts = 1; } + + /* Enable out-of-place processing */ + if (ipsec->options.ingress_oop) + inb_sa_dptr->w0.s.pkt_format = + ROC_IE_OT_SA_PKT_FMT_FULL; + /* Prepare session priv */ sess_priv.inb_sa = 1; sess_priv.sa_idx = ipsec->spi & spi_mask; @@ -754,6 +867,7 @@ cn10k_eth_sec_session_create(void *device, eth_sec->spi = ipsec->spi; eth_sec->inl_dev = !!dev->inb.inl_dev; eth_sec->inb = true; + eth_sec->inb_oop = !!ipsec->options.ingress_oop; TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry); dev->inb.nb_sess++; @@ -769,6 +883,15 @@ cn10k_eth_sec_session_create(void *device, inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit; } + if (ipsec->options.ingress_oop) + dev->inb.nb_oop++; + + /* Update function pointer to handle OOP sessions */ + if (dev->inb.nb_oop && + !(dev->rx_offload_flags & NIX_RX_REAS_F)) { + dev->rx_offload_flags |= NIX_RX_REAS_F; + cn10k_eth_set_rx_function(eth_dev); + } } else { struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr; struct cn10k_outb_priv_data *outb_priv; @@ -918,6 +1041,15 @@ cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess) sizeof(struct roc_ot_ipsec_inb_sa)); TAILQ_REMOVE(&dev->inb.list, eth_sec, entry); dev->inb.nb_sess--; + if (eth_sec->inb_oop) + dev->inb.nb_oop--; + + /* Clear offload flags if was used by OOP */ + if (!dev->inb.nb_oop && !dev->inb.reass_en && + dev->rx_offload_flags & NIX_RX_REAS_F) { + dev->rx_offload_flags &= ~NIX_RX_REAS_F; + cn10k_eth_set_rx_function(eth_dev); + } } else { /* Disable SA */ sa_dptr = dev->outb.sa_dptr; @@ -1093,15 +1225,38 @@ cn10k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess, return 0; } +static void +eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx, + const struct rte_security_capability *caps, uint32_t nb_caps) +{ + PLT_VERIFY(*idx + nb_caps < SEC_CAPS_LEN); + + rte_memcpy(ð_sec_caps[*idx], caps, nb_caps * sizeof(caps[0])); + *idx += nb_caps; +} + void cn10k_eth_sec_ops_override(void) { static int init_once; + uint32_t idx = 0; if (init_once) return; init_once = 1; + if (roc_feature_nix_has_inl_ipsec()) + eth_sec_caps_add(cn10k_eth_sec_capabilities, &idx, + cn10k_eth_sec_ipsec_capabilities, + RTE_DIM(cn10k_eth_sec_ipsec_capabilities)); + + if (roc_feature_nix_has_macsec()) + eth_sec_caps_add(cn10k_eth_sec_capabilities, &idx, + cn10k_eth_sec_macsec_capabilities, + RTE_DIM(cn10k_eth_sec_macsec_capabilities)); + + cn10k_eth_sec_capabilities[idx].action = RTE_SECURITY_ACTION_TYPE_NONE; + /* Update platform specific ops */ cnxk_eth_sec_ops.macsec_sa_create = cnxk_eth_macsec_sa_create; cnxk_eth_sec_ops.macsec_sc_create = cnxk_eth_macsec_sc_create; diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h index 8148866e44d..f5e935d3838 100644 --- a/drivers/net/cnxk/cn10k_rx.h +++ b/drivers/net/cnxk/cn10k_rx.h @@ -164,9 +164,9 @@ nix_sec_reass_frags_get(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf **nex next_mbufs[1] = ((struct rte_mbuf *)vgetq_lane_u64(frags23, 0) - 1); next_mbufs[2] = ((struct rte_mbuf *)vgetq_lane_u64(frags23, 1) - 1); - fsz_w1 = vdup_n_u64(finfo->w1.u64); + fsz_w1 = vreinterpret_u16_u64(vdup_n_u64(finfo->w1.u64)); fsz_w1 = vrev16_u8(fsz_w1); - return vget_lane_u64(fsz_w1, 0); + return vget_lane_u64(vreinterpret_u64_u16(fsz_w1), 0); } static __rte_always_inline void @@ -174,7 +174,7 @@ nix_sec_reass_first_frag_update(struct rte_mbuf *head, const uint8_t *m_ipptr, uint64_t fsz, uint64_t cq_w1, uint16_t *ihl) { union nix_rx_parse_u *rx = (union nix_rx_parse_u *)((uintptr_t)(head + 1) + 8); - uint16_t fragx_sum = vaddv_u16(vdup_n_u64(fsz)); + uint16_t fragx_sum = vaddv_u16(vreinterpret_u16_u64(vdup_n_u64(fsz))); uint8_t lcptr = rx->lcptr; uint16_t tot_len; uint32_t cksum; @@ -205,26 +205,36 @@ nix_sec_reass_first_frag_update(struct rte_mbuf *head, const uint8_t *m_ipptr, struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)ipptr; size_t ext_len = sizeof(struct rte_ipv6_hdr); uint8_t *nxt_hdr = (uint8_t *)hdr; + uint8_t *nxt_proto = &hdr->proto; int nh = hdr->proto; *ihl = 0; + tot_len = 0; while (nh != -EINVAL) { nxt_hdr += ext_len; *ihl += ext_len; + if (nh == IPPROTO_FRAGMENT) { + *nxt_proto = *nxt_hdr; + tot_len = *ihl; + } nh = rte_ipv6_get_next_ext(nxt_hdr, nh, &ext_len); + nxt_proto = nxt_hdr; } /* Remove the frag header by moving header 8 bytes forward */ hdr->payload_len = rte_cpu_to_be_16(fragx_sum + *ihl - 8 - sizeof(struct rte_ipv6_hdr)); + /* tot_len is sum of all IP header's length before fragment header */ rte_memcpy(rte_pktmbuf_mtod_offset(head, void *, 8), rte_pktmbuf_mtod(head, void *), - lcptr + sizeof(struct rte_ipv6_hdr)); + lcptr + tot_len); head->data_len -= 8; head->data_off += 8; head->pkt_len = lcptr + *ihl - 8 + fragx_sum; + /* ihl l3hdr size value should be up to fragment header for next frags */ + *ihl = tot_len + 8; } } @@ -402,6 +412,41 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *hea return head; } +static inline struct rte_mbuf * +nix_sec_oop_process(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *mbuf, uint64_t *mbuf_init) +{ + uintptr_t wqe = rte_be_to_cpu_64(hdr->wqe_ptr); + union nix_rx_parse_u *inner_rx; + struct rte_mbuf *inner; + uint16_t data_off; + + inner = ((struct rte_mbuf *)wqe) - 1; + + inner_rx = (union nix_rx_parse_u *)(wqe + 8); + inner->pkt_len = inner_rx->pkt_lenm1 + 1; + inner->data_len = inner_rx->pkt_lenm1 + 1; + + /* Mark inner mbuf as get */ + RTE_MEMPOOL_CHECK_COOKIES(inner->pool, + (void **)&inner, 1, 1); + /* Update rearm data for full mbuf as it has + * cpt parse header that needs to be skipped. + * + * Since meta pool will not have private area while + * ethdev RQ's first skip would be considering private area + * calculate actual data off and update in meta mbuf. + */ + data_off = (uintptr_t)hdr - (uintptr_t)mbuf->buf_addr; + data_off += sizeof(struct cpt_parse_hdr_s); + data_off += hdr->w0.pad_len; + *mbuf_init &= ~0xFFFFUL; + *mbuf_init |= (uint64_t)data_off; + + *rte_security_oop_dynfield(mbuf) = inner; + /* Return outer instead of inner mbuf as inner mbuf would have original encrypted packet */ + return mbuf; +} + static __rte_always_inline struct rte_mbuf * nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base, uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf, @@ -422,14 +467,18 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base, if (!(cq_w1 & BIT(11))) return mbuf; - inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) - - sizeof(struct rte_mbuf)); + if (flags & NIX_RX_REAS_F && hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) { + inner = nix_sec_oop_process(hdr, mbuf, &mbuf_init); + } else { + inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) - + sizeof(struct rte_mbuf)); - /* Store meta in lmtline to free - * Assume all meta's from same aura. - */ - *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf; - *loff = *loff + 1; + /* Store meta in lmtline to free + * Assume all meta's from same aura. + */ + *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf; + *loff = *loff + 1; + } /* Get SPI from CPT_PARSE_S's cookie(already swapped) */ w0 = hdr->w0.u64; @@ -471,11 +520,13 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base, & 0xFF) << 1 : RTE_MBUF_F_RX_IP_CKSUM_GOOD; } - /* Mark meta mbuf as put */ - RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0); + if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) { + /* Mark meta mbuf as put */ + RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0); - /* Mark inner mbuf as get */ - RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1); + /* Mark inner mbuf as get */ + RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1); + } /* Skip reassembly processing when multi-seg is enabled */ if (!(flags & NIX_RX_MULTI_SEG_F) && (flags & NIX_RX_REAS_F) && hdr->w0.num_frags) { @@ -510,6 +561,7 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa, (const struct cpt_parse_hdr_s *)cpth; uint64_t mbuf_init = vgetq_lane_u64(*rearm, 0); struct cn10k_inb_priv_data *inb_priv; + uintptr_t p; /* Clear checksum flags */ *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK | @@ -522,7 +574,9 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa, *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata; /* Mark inner mbuf as get */ - RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1); + if (!(flags & NIX_RX_REAS_F) || + hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) + RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1); if (!(flags & NIX_RX_MULTI_SEG_F) && flags & NIX_RX_REAS_F && hdr->w0.num_frags) { if ((!(hdr->w0.err_sum) || roc_ie_ot_ucc_is_success(hdr->w3.uc_ccode)) && @@ -530,7 +584,8 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa, /* First frag len */ inner->pkt_len = vgetq_lane_u16(*rx_desc_field1, 2); inner->data_len = vgetq_lane_u16(*rx_desc_field1, 4); - *(uint64_t *)(&inner->rearm_data) = mbuf_init; + p = (uintptr_t)&inner->rearm_data; + *(uint64_t *)p = mbuf_init; /* Reassembly success */ nix_sec_reassemble_frags(hdr, inner, cq_w1, cq_w5, mbuf_init); @@ -545,13 +600,26 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa, *rx_desc_field1, 4); /* Data offset might be updated */ - mbuf_init = *(uint64_t *)(&inner->rearm_data); + mbuf_init = *(uint64_t *)p; *rearm = vsetq_lane_u64(mbuf_init, *rearm, 0); } else { /* Reassembly failure */ nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init); *ol_flags |= inner->ol_flags; } + } else if (flags & NIX_RX_REAS_F) { + /* Without fragmentation but may have to handle OOP session */ + if (hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) { + uint64_t mbuf_init = 0; + + /* Caller has already prepared to return second pass + * mbuf and inner mbuf is actually outer. + * Store original buffer pointer in dynfield. + */ + nix_sec_oop_process(hdr, inner, &mbuf_init); + /* Clear and update lower 16 bit of data offset */ + *rearm = (*rearm & ~(BIT_ULL(16) - 1)) | mbuf_init; + } } } #endif @@ -628,6 +696,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, uint64_t cq_w1; int64_t len; uint64_t sg; + uintptr_t p; cq_w1 = *(const uint64_t *)rx; if (flags & NIX_RX_REAS_F) @@ -635,7 +704,9 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, /* Use inner rx parse for meta pkts sg list */ if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) { const uint64_t *wqe = (const uint64_t *)(mbuf + 1); - rx = (const union nix_rx_parse_u *)(wqe + 1); + + if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) + rx = (const union nix_rx_parse_u *)(wqe + 1); } sg = *(const uint64_t *)(rx + 1); @@ -703,7 +774,8 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, mbuf->data_len = sg_len; sg = sg >> 16; - *(uint64_t *)(&mbuf->rearm_data) = rearm & ~0xFFFF; + p = (uintptr_t)&mbuf->rearm_data; + *(uint64_t *)p = rearm & ~0xFFFF; nb_segs--; iova_list++; @@ -753,7 +825,8 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, head->nb_segs = nb_segs; } mbuf = next_frag; - *(uint64_t *)(&mbuf->rearm_data) = rearm + ldptr; + p = (uintptr_t)&mbuf->rearm_data; + *(uint64_t *)p = rearm + ldptr; mbuf->data_len = (sg & 0xFFFF) - ldptr - (flags & NIX_RX_OFFLOAD_TSTAMP_F ? CNXK_NIX_TIMESYNC_RX_OFFSET : 0); @@ -761,6 +834,31 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, num_frags--; frag_i++; goto again; + } else if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) && !reas_success && + hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) { + uintptr_t wqe = rte_be_to_cpu_64(hdr->wqe_ptr); + + /* Process OOP packet inner buffer mseg. reas_success flag is used here only + * to avoid looping. + */ + mbuf = ((struct rte_mbuf *)wqe) - 1; + rx = (const union nix_rx_parse_u *)(wqe + 8); + eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1)); + sg = *(const uint64_t *)(rx + 1); + nb_segs = (sg >> 48) & 0x3; + + + len = mbuf->pkt_len; + p = (uintptr_t)&mbuf->rearm_data; + *(uint64_t *)p = rearm; + mbuf->data_len = (sg & 0xFFFF) - + (flags & NIX_RX_OFFLOAD_TSTAMP_F ? + CNXK_NIX_TIMESYNC_RX_OFFSET : 0); + head = mbuf; + head->nb_segs = nb_segs; + /* Using this flag to avoid looping in case of OOP */ + reas_success = true; + goto again; } /* Update for last failure fragment */ @@ -781,6 +879,7 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, const uint64_t w1 = *(const uint64_t *)rx; uint16_t len = rx->pkt_lenm1 + 1; uint64_t ol_flags = 0; + uintptr_t p; if (flag & NIX_RX_OFFLOAD_PTYPE_F) mbuf->packet_type = nix_ptype_get(lookup_mem, w1); @@ -795,7 +894,7 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, /* Skip rx ol flags extraction for Security packets */ if ((!(flag & NIX_RX_SEC_REASSEMBLY_F) || !(w1 & BIT(11))) && flag & NIX_RX_OFFLOAD_CHECKSUM_F) - ol_flags |= nix_rx_olflags_get(lookup_mem, w1); + ol_flags |= (uint64_t)nix_rx_olflags_get(lookup_mem, w1); if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) { if (rx->vtag0_gone) { @@ -818,7 +917,8 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, mbuf->ol_flags = ol_flags; mbuf->pkt_len = len; mbuf->data_len = len; - *(uint64_t *)(&mbuf->rearm_data) = val; + p = (uintptr_t)&mbuf->rearm_data; + *(uint64_t *)p = val; } if (flag & NIX_RX_MULTI_SEG_F) @@ -899,6 +999,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, const uint64_t mbuf_init = rxq->mbuf_initializer; const void *lookup_mem = rxq->lookup_mem; const uint64_t data_off = rxq->data_off; + struct rte_mempool *meta_pool = NULL; const uintptr_t desc = rxq->desc; const uint64_t wdata = rxq->wdata; const uint32_t qmask = rxq->qmask; @@ -909,8 +1010,8 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, struct nix_cqe_hdr_s *cq; struct rte_mbuf *mbuf; uint64_t aura_handle; + uint64_t sa_base = 0; uintptr_t cpth = 0; - uint64_t sa_base; uint16_t lmt_id; uint64_t laddr; @@ -923,6 +1024,8 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, ROC_LMT_BASE_ID_GET(lbase, lmt_id); laddr = lbase; laddr += 8; + if (flags & NIX_RX_REAS_F) + meta_pool = (struct rte_mempool *)rxq->meta_pool; } while (packets < nb_pkts) { @@ -943,6 +1046,11 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, cpth = ((uintptr_t)mbuf + (uint16_t)data_off); + /* Update mempool pointer for full mode pkt */ + if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) && + !((*(uint64_t *)cpth) & BIT(15))) + mbuf->pool = meta_pool; + mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr, &loff, mbuf, data_off, flags, mbuf_init); @@ -1047,12 +1155,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer); struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; uint8_t loff = 0, lnum = 0, shft = 0; + struct rte_mempool *meta_pool = NULL; uint8x16_t f0, f1, f2, f3; uint16_t lmt_id, d_off; uint64_t lbase, laddr; + uintptr_t sa_base = 0; uint16_t packets = 0; uint16_t pkts_left; - uintptr_t sa_base; uint32_t head; uintptr_t cq0; @@ -1099,6 +1208,9 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, /* Get SA Base from lookup tbl using port_id */ port = mbuf_initializer >> 48; sa_base = cnxk_nix_sa_base_get(port, lookup_mem); + if (flags & NIX_RX_REAS_F) + meta_pool = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, + lookup_mem); lbase = lmt_base; } else { @@ -1106,6 +1218,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, d_off = rxq->data_off; sa_base = rxq->sa_base; lbase = rxq->lmt_base; + if (flags & NIX_RX_REAS_F) + meta_pool = (struct rte_mempool *)rxq->meta_pool; } sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1); ROC_LMT_BASE_ID_GET(lbase, lmt_id); @@ -1334,10 +1448,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, } if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) { - ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1); - ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1); - ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1); - ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1); + ol_flags0 |= (uint64_t)nix_rx_olflags_get(lookup_mem, cq0_w1); + ol_flags1 |= (uint64_t)nix_rx_olflags_get(lookup_mem, cq1_w1); + ol_flags2 |= (uint64_t)nix_rx_olflags_get(lookup_mem, cq2_w1); + ol_flags3 |= (uint64_t)nix_rx_olflags_get(lookup_mem, cq3_w1); } /* Translate meta to mbuf */ @@ -1510,10 +1624,19 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, uint16_t len = vget_lane_u16(lens, 0); cpth0 = (uintptr_t)mbuf0 + d_off; + /* Free meta to aura */ - NIX_PUSH_META_TO_FREE(mbuf0, laddr, &loff); - mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0); - mbuf0 = (struct rte_mbuf *)wqe; + if (!(flags & NIX_RX_REAS_F) || + *(uint64_t *)cpth0 & BIT_ULL(15)) { + /* Free meta to aura */ + NIX_PUSH_META_TO_FREE(mbuf0, laddr, + &loff); + mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0); + mbuf0 = (struct rte_mbuf *)wqe; + } else if (flags & NIX_RX_REAS_F) { + /* Update meta pool for full mode pkts */ + mbuf0->pool = meta_pool; + } /* Update pkt_len and data_len */ f0 = vsetq_lane_u16(len, f0, 2); @@ -1535,10 +1658,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, uint16_t len = vget_lane_u16(lens, 1); cpth1 = (uintptr_t)mbuf1 + d_off; + /* Free meta to aura */ - NIX_PUSH_META_TO_FREE(mbuf1, laddr, &loff); - mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1); - mbuf1 = (struct rte_mbuf *)wqe; + if (!(flags & NIX_RX_REAS_F) || + *(uint64_t *)cpth1 & BIT_ULL(15)) { + NIX_PUSH_META_TO_FREE(mbuf1, laddr, + &loff); + mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1); + mbuf1 = (struct rte_mbuf *)wqe; + } else if (flags & NIX_RX_REAS_F) { + /* Update meta pool for full mode pkts */ + mbuf1->pool = meta_pool; + } /* Update pkt_len and data_len */ f1 = vsetq_lane_u16(len, f1, 2); @@ -1559,10 +1690,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, uint16_t len = vget_lane_u16(lens, 2); cpth2 = (uintptr_t)mbuf2 + d_off; + /* Free meta to aura */ - NIX_PUSH_META_TO_FREE(mbuf2, laddr, &loff); - mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0); - mbuf2 = (struct rte_mbuf *)wqe; + if (!(flags & NIX_RX_REAS_F) || + *(uint64_t *)cpth2 & BIT_ULL(15)) { + NIX_PUSH_META_TO_FREE(mbuf2, laddr, + &loff); + mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0); + mbuf2 = (struct rte_mbuf *)wqe; + } else if (flags & NIX_RX_REAS_F) { + /* Update meta pool for full mode pkts */ + mbuf2->pool = meta_pool; + } /* Update pkt_len and data_len */ f2 = vsetq_lane_u16(len, f2, 2); @@ -1583,10 +1722,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, uint16_t len = vget_lane_u16(lens, 3); cpth3 = (uintptr_t)mbuf3 + d_off; + /* Free meta to aura */ - NIX_PUSH_META_TO_FREE(mbuf3, laddr, &loff); - mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1); - mbuf3 = (struct rte_mbuf *)wqe; + if (!(flags & NIX_RX_REAS_F) || + *(uint64_t *)cpth3 & BIT_ULL(15)) { + NIX_PUSH_META_TO_FREE(mbuf3, laddr, + &loff); + mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1); + mbuf3 = (struct rte_mbuf *)wqe; + } else if (flags & NIX_RX_REAS_F) { + /* Update meta pool for full mode pkts */ + mbuf3->pool = meta_pool; + } /* Update pkt_len and data_len */ f3 = vsetq_lane_u16(len, f3, 2); @@ -1715,7 +1862,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, * timestamp. */ tstamp->rx_ready = 1; - tstamp->rx_tstamp = ts[31 - __builtin_clz(res)]; + tstamp->rx_tstamp = ts[31 - rte_clz32(res)]; } } diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h index b4287e2864a..aeffc4ac924 100644 --- a/drivers/net/cnxk/cn10k_rxtx.h +++ b/drivers/net/cnxk/cn10k_rxtx.h @@ -78,6 +78,7 @@ struct cn10k_eth_rxq { uint64_t sa_base; uint64_t lmt_base; uint64_t meta_aura; + uintptr_t meta_pool; uint16_t rq; struct cnxk_timesync_info *tstamp; } __plt_cache_aligned; diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h index 298d243aaca..e7943e3c889 100644 --- a/drivers/net/cnxk/cn10k_tx.h +++ b/drivers/net/cnxk/cn10k_tx.h @@ -484,7 +484,7 @@ cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum, data &= ~(0x7ULL << 16); /* Update lines - 1 that contain valid data */ data |= ((uint64_t)(lnum + loff - 1)) << 12; - data |= lmt_id; + data |= (uint64_t)lmt_id; /* STEOR */ roc_lmt_submit_steorl(data, pa); @@ -577,7 +577,7 @@ cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1, nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1); nixtx += 16; - w0 |= cn10k_nix_tx_ext_subs(flags) + 1; + w0 |= cn10k_nix_tx_ext_subs(flags) + 1ULL; dptr += l2_len; ucode_cmd[1] = dptr; *cmd1 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd1, 0); @@ -718,7 +718,7 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr, nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1); nixtx += 16; - w0 |= cn10k_nix_tx_ext_subs(flags) + 1; + w0 |= cn10k_nix_tx_ext_subs(flags) + 1ULL; dptr += l2_len; ucode_cmd[1] = dptr; sg->seg1_size = pkt_len + dlen_adj; @@ -1011,7 +1011,8 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, send_hdr_ext->w0.markptr = markptr; } - if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { + if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_TSO_F && + (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { uint16_t lso_sb; uint64_t mask; @@ -1374,8 +1375,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts, lnum++; } - if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35))) - ws[1] = roc_sso_hws_head_wait(ws[0]); + if ((flags & NIX_TX_VWQE_F) && !(ws[3] & BIT_ULL(35))) + ws[3] = roc_sso_hws_head_wait(ws[0]); left -= burst; tx_pkts += burst; @@ -1421,7 +1422,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts, pa = io_addr | (data & 0x7) << 4; data &= ~0x7ULL; data |= ((uint64_t)(burst - 1)) << 12; - data |= lmt_id; + data |= (uint64_t)lmt_id; if (flags & NIX_TX_VWQE_F) cn10k_nix_vwqe_wait_fc(txq, burst); @@ -1531,8 +1532,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws, } } - if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35))) - ws[1] = roc_sso_hws_head_wait(ws[0]); + if ((flags & NIX_TX_VWQE_F) && !(ws[3] & BIT_ULL(35))) + ws[3] = roc_sso_hws_head_wait(ws[0]); left -= burst; tx_pkts += burst; @@ -1583,7 +1584,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws, data0 &= ~0x7ULL; /* Move lmtst1..15 sz to bits 63:19 */ data0 <<= 16; - data0 |= ((burst - 1) << 12); + data0 |= ((burst - 1ULL) << 12); data0 |= (uint64_t)lmt_id; if (flags & NIX_TX_VWQE_F) @@ -1999,6 +2000,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, uint64x2_t xmask01_w0, xmask23_w0; uint64x2_t xmask01_w1, xmask23_w1; rte_iova_t io_addr = txq->io_addr; + uint8_t lnum, shift = 0, loff = 0; uintptr_t laddr = txq->lmt_base; uint8_t c_lnum, c_shft, c_loff; struct nix_send_hdr_s send_hdr; @@ -2006,7 +2008,6 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, uint64x2_t xtmp128, ytmp128; uint64x2_t xmask01, xmask23; uintptr_t c_laddr = laddr; - uint8_t lnum, shift, loff = 0; rte_iova_t c_io_addr; uint64_t sa_base; union wdata { @@ -3122,8 +3123,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)) wd.data[0] >>= 16; - if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35))) - ws[1] = roc_sso_hws_head_wait(ws[0]); + if ((flags & NIX_TX_VWQE_F) && !(ws[3] & BIT_ULL(35))) + ws[3] = roc_sso_hws_head_wait(ws[0]); left -= burst; @@ -3193,7 +3194,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, wd.data[0] <<= 16; wd.data[0] |= ((uint64_t)(lnum - 1)) << 12; - wd.data[0] |= lmt_id; + wd.data[0] |= (uint64_t)lmt_id; if (flags & NIX_TX_VWQE_F) cn10k_nix_vwqe_wait_fc(txq, burst); diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h index 4d476d0a02f..d8bb65c643b 100644 --- a/drivers/net/cnxk/cn9k_rx.h +++ b/drivers/net/cnxk/cn9k_rx.h @@ -788,7 +788,7 @@ cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxq->tstamp->rx_ready = 1; rxq->tstamp->rx_tstamp = - ts[31 - __builtin_clz(res)]; + ts[31 - rte_clz32(res)]; } } diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 01b707b6c4a..5e11bbb017e 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -1197,8 +1197,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE]; struct roc_nix_fc_cfg fc_cfg = {0}; struct roc_nix *nix = &dev->nix; + uint16_t nb_rxq, nb_txq, nb_cq; struct rte_ether_addr *ea; - uint16_t nb_rxq, nb_txq; uint64_t rx_cfg; void *qs; int rc; @@ -1301,6 +1301,15 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto fail_configure; } + if (!roc_nix_is_vf_or_sdp(nix)) { + /* Sync same MAC address to CGX/RPM table */ + rc = roc_nix_mac_addr_set(nix, dev->mac_addr); + if (rc) { + plt_err("Failed to set mac addr, rc=%d", rc); + goto fail_configure; + } + } + /* Check if ptp is enable in PF owning this VF*/ if (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))) dev->ptp_en = roc_nix_ptp_is_enable(nix); @@ -1309,6 +1318,9 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) nb_rxq = data->nb_rx_queues; nb_txq = data->nb_tx_queues; + nb_cq = nb_rxq; + if (nix->tx_compl_ena) + nb_cq += nb_txq; rc = -ENOMEM; if (nb_rxq) { /* Allocate memory for roc rq's and cq's */ @@ -1318,13 +1330,6 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto free_nix_lf; } dev->rqs = qs; - - qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0); - if (!qs) { - plt_err("Failed to alloc cqs"); - goto free_nix_lf; - } - dev->cqs = qs; } if (nb_txq) { @@ -1335,15 +1340,15 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto free_nix_lf; } dev->sqs = qs; + } - if (nix->tx_compl_ena) { - qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_txq, 0); - if (!qs) { - plt_err("Failed to alloc cqs"); - goto free_nix_lf; - } - dev->cqs = qs; + if (nb_cq) { + qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_cq, 0); + if (!qs) { + plt_err("Failed to alloc cqs"); + goto free_nix_lf; } + dev->cqs = qs; } /* Re-enable NIX LF error interrupts */ @@ -1925,6 +1930,13 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) goto dev_fini; } + dev->dmac_idx_map = rte_zmalloc("dmac_idx_map", max_entries * sizeof(int), 0); + if (dev->dmac_idx_map == NULL) { + plt_err("Failed to allocate memory for dmac idx map"); + rc = -ENOMEM; + goto free_mac_addrs; + } + dev->max_mac_entries = max_entries; dev->dmac_filter_count = 1; @@ -1938,15 +1950,6 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) /* Update the mac address */ memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN); - if (!roc_nix_is_vf_or_sdp(nix)) { - /* Sync same MAC address to CGX/RPM table */ - rc = roc_nix_mac_addr_set(nix, dev->mac_addr); - if (rc) { - plt_err("Failed to set mac addr, rc=%d", rc); - goto free_mac_addrs; - } - } - /* Union of all capabilities supported by CNXK. * Platform specific capabilities will be * updated later. @@ -1982,6 +1985,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) free_mac_addrs: rte_free(eth_dev->data->mac_addrs); + rte_free(dev->dmac_idx_map); dev_fini: roc_nix_dev_fini(nix); error: @@ -2099,6 +2103,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) if (rc) plt_err("Failed to free nix lf, rc=%d", rc); + rte_free(dev->dmac_idx_map); + dev->dmac_idx_map = NULL; + rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index ed531fb2773..4d3ebf123b6 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -219,6 +219,9 @@ struct cnxk_eth_sec_sess { /* Inbound session on inl dev */ bool inl_dev; + + /* Out-Of-Place processing */ + bool inb_oop; }; TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess); @@ -246,6 +249,12 @@ struct cnxk_eth_dev_sec_inb { /* DPTR for WRITE_SA microcode op */ void *sa_dptr; + /* Number of oop sessions */ + uint16_t nb_oop; + + /* Reassembly enabled */ + bool reass_en; + /* Lock to synchronize sa setup/release */ rte_spinlock_t lock; }; @@ -329,6 +338,7 @@ struct cnxk_eth_dev { uint8_t dmac_filter_count; uint8_t max_mac_entries; bool dmac_filter_enable; + int *dmac_idx_map; uint16_t flags; uint8_t ptype_disable; diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c index e1a0845ece6..8e862be9333 100644 --- a/drivers/net/cnxk/cnxk_ethdev_devargs.c +++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c @@ -245,6 +245,19 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args) return 0; } +static int +parse_val_u16(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + uint16_t val; + + val = atoi(value); + + *(uint16_t *)extra_args = val; + + return 0; +} + #define CNXK_RSS_RETA_SIZE "reta_size" #define CNXK_SCL_ENABLE "scalar_enable" #define CNXK_TX_COMPL_ENA "tx_compl_ena" @@ -265,10 +278,12 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args) #define CNXK_CUSTOM_SA_ACT "custom_sa_act" #define CNXK_SQB_SLACK "sqb_slack" #define CNXK_NIX_META_BUF_SZ "meta_buf_sz" +#define CNXK_FLOW_AGING_POLL_FREQ "aging_poll_freq" int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) { + uint16_t aging_thread_poll_freq = ROC_NPC_AGE_POLL_FREQ_MIN; uint16_t reta_sz = ROC_NIX_RSS_RETA_SZ_64; uint16_t sqb_count = CNXK_NIX_TX_MAX_SQB; struct flow_pre_l2_size_info pre_l2_info; @@ -338,6 +353,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) rte_kvargs_process(kvlist, CNXK_SQB_SLACK, &parse_sqb_count, &sqb_slack); rte_kvargs_process(kvlist, CNXK_NIX_META_BUF_SZ, &parse_meta_bufsize, &meta_buf_sz); + rte_kvargs_process(kvlist, CNXK_FLOW_AGING_POLL_FREQ, &parse_val_u16, + &aging_thread_poll_freq); rte_kvargs_free(kvlist); null_devargs: @@ -369,6 +386,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) dev->npc.pre_l2_size_offset = pre_l2_info.pre_l2_size_off; dev->npc.pre_l2_size_offset_mask = pre_l2_info.pre_l2_size_off_mask; dev->npc.pre_l2_size_shift_dir = pre_l2_info.pre_l2_size_shift_dir; + dev->npc.flow_age.aging_poll_freq = aging_thread_poll_freq; return 0; exit: return -EINVAL; @@ -390,4 +408,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk, CNXK_NO_INL_DEV "=0" CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>" CNXK_CUSTOM_SA_ACT "=1" - CNXK_SQB_SLACK "=<12-512>"); + CNXK_SQB_SLACK "=<12-512>" + CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>"); diff --git a/drivers/net/cnxk/cnxk_ethdev_mcs.c b/drivers/net/cnxk/cnxk_ethdev_mcs.c index 52647743948..06ef7c98f3c 100644 --- a/drivers/net/cnxk/cnxk_ethdev_mcs.c +++ b/drivers/net/cnxk/cnxk_ethdev_mcs.c @@ -113,6 +113,8 @@ cnxk_eth_macsec_sa_create(void *device, struct rte_security_macsec_sa *conf) return -EINVAL; } + roc_mcs_sa_port_map_update(mcs_dev->mdev, sa_id, mcs_dev->port_id); + return sa_id; } @@ -586,9 +588,11 @@ cnxk_eth_macsec_session_stats_get(struct cnxk_eth_dev *dev, struct cnxk_macsec_s } static int -cnxk_mcs_event_cb(void *userdata, struct roc_mcs_event_desc *desc, void *cb_arg) +cnxk_mcs_event_cb(void *userdata, struct roc_mcs_event_desc *desc, void *cb_arg, + uint8_t port_id) { struct rte_eth_event_macsec_desc d = {0}; + struct cnxk_mcs_dev *mcs_dev = userdata; d.metadata = (uint64_t)userdata; @@ -617,15 +621,23 @@ cnxk_mcs_event_cb(void *userdata, struct roc_mcs_event_desc *desc, void *cb_arg) break; case ROC_MCS_EVENT_RX_SA_PN_HARD_EXP: d.type = RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP; + if (mcs_dev->port_id != port_id) + return 0; break; case ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP: d.type = RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP; + if (mcs_dev->port_id != port_id) + return 0; break; case ROC_MCS_EVENT_TX_SA_PN_HARD_EXP: d.type = RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP; + if (mcs_dev->port_id != port_id) + return 0; break; case ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP: d.type = RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP; + if (mcs_dev->port_id != port_id) + return 0; break; default: plt_err("Unknown MACsec event type: %d", desc->type); diff --git a/drivers/net/cnxk/cnxk_ethdev_mtr.c b/drivers/net/cnxk/cnxk_ethdev_mtr.c index 27a6e4ef3dd..edeca6dcc38 100644 --- a/drivers/net/cnxk/cnxk_ethdev_mtr.c +++ b/drivers/net/cnxk/cnxk_ethdev_mtr.c @@ -613,6 +613,11 @@ cnxk_nix_mtr_destroy(struct rte_eth_dev *eth_dev, uint32_t mtr_id, while ((mtr->prev_cnt) + 1) { mid_mtr = nix_mtr_find(dev, mtr->prev_id[mtr->prev_cnt]); + if (mid_mtr == NULL) { + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, &mtr->prev_id[mtr->prev_cnt], + "Mid meter id is invalid."); + } rc = roc_nix_bpf_connect(nix, ROC_NIX_BPF_LEVEL_F_LEAF, mid_mtr->bpf_id, ROC_NIX_BPF_ID_INVALID); @@ -628,6 +633,11 @@ cnxk_nix_mtr_destroy(struct rte_eth_dev *eth_dev, uint32_t mtr_id, while (mtr->prev_cnt) { top_mtr = nix_mtr_find(dev, mtr->prev_id[mtr->prev_cnt]); + if (top_mtr == NULL) { + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, &mtr->prev_id[mtr->prev_cnt], + "Top meter id is invalid."); + } rc = roc_nix_bpf_connect(nix, ROC_NIX_BPF_LEVEL_F_MID, top_mtr->bpf_id, ROC_NIX_BPF_ID_INVALID); @@ -1590,6 +1600,8 @@ nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, switch (*tree_level) { case 0: mtr = nix_get_mtr(eth_dev, cur_mtr_id); + if (mtr == NULL) + return -EINVAL; if (mtr->level == ROC_NIX_BPF_LEVEL_IDX_INVALID) { nix_mtr_level_update(eth_dev, cur_mtr_id, 0); nix_mtr_chain_update(eth_dev, cur_mtr_id, -1, @@ -1605,6 +1617,8 @@ nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, break; case 1: mtr = nix_get_mtr(eth_dev, cur_mtr_id); + if (mtr == NULL) + return -EINVAL; if (mtr->level == ROC_NIX_BPF_LEVEL_IDX_INVALID) { nix_mtr_level_update(eth_dev, cur_mtr_id, 1); prev_mtr_id = id; @@ -1635,6 +1649,8 @@ nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, switch (*tree_level) { case 0: mtr = nix_get_mtr(eth_dev, cur_mtr_id); + if (mtr == NULL) + return -EINVAL; if (mtr->level == ROC_NIX_BPF_LEVEL_IDX_INVALID) { nix_mtr_level_update(eth_dev, cur_mtr_id, 0); } else { @@ -1646,6 +1662,8 @@ nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, break; case 1: mtr = nix_get_mtr(eth_dev, cur_mtr_id); + if (mtr == NULL) + return -EINVAL; if (mtr->level == ROC_NIX_BPF_LEVEL_IDX_INVALID) { nix_mtr_level_update(eth_dev, cur_mtr_id, 1); prev_mtr_id = id; @@ -1666,6 +1684,8 @@ nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, break; case 2: mtr = nix_get_mtr(eth_dev, cur_mtr_id); + if (mtr == NULL) + return -EINVAL; if (mtr->level == ROC_NIX_BPF_LEVEL_IDX_INVALID) { nix_mtr_level_update(eth_dev, cur_mtr_id, 2); prev_mtr_id = *prev_id; diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index 3ade8eed362..5de2919047c 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -474,6 +474,8 @@ cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr, return rc; } + dev->dmac_idx_map[index] = rc; + /* Enable promiscuous mode at NIX level */ roc_nix_npc_promisc_ena_dis(nix, true); dev->dmac_filter_enable = true; @@ -490,7 +492,7 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) struct roc_nix *nix = &dev->nix; int rc; - rc = roc_nix_mac_addr_del(nix, index); + rc = roc_nix_mac_addr_del(nix, dev->dmac_idx_map[index]); if (rc) plt_err("Failed to delete mac address, rc=%d", rc); diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c index dc17c128def..b02dac49527 100644 --- a/drivers/net/cnxk/cnxk_ethdev_sec.c +++ b/drivers/net/cnxk/cnxk_ethdev_sec.c @@ -36,7 +36,7 @@ bitmap_ctzll(uint64_t slab) if (slab == 0) return 0; - return __builtin_ctzll(slab); + return rte_ctz64(slab); } int diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c index 3b8348ae9c4..08ab75e2bbb 100644 --- a/drivers/net/cnxk/cnxk_flow.c +++ b/drivers/net/cnxk/cnxk_flow.c @@ -59,6 +59,8 @@ const struct cnxk_rte_flow_term_info term[] = { sizeof(struct rte_flow_item_raw)}, [RTE_FLOW_ITEM_TYPE_MARK] = {ROC_NPC_ITEM_TYPE_MARK, sizeof(struct rte_flow_item_mark)}, + [RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT, + sizeof(struct rte_flow_item_ipv6_routing_ext)}, [RTE_FLOW_ITEM_TYPE_TX_QUEUE] = {ROC_NPC_ITEM_TYPE_TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)}}; @@ -230,6 +232,10 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, in_actions[i].type = ROC_NPC_ACTION_TYPE_METER; in_actions[i].conf = actions->conf; break; + case RTE_FLOW_ACTION_TYPE_AGE: + in_actions[i].type = ROC_NPC_ACTION_TYPE_AGE; + in_actions[i].conf = actions->conf; + break; default: plt_npc_dbg("Action is not supported = %d", actions->type); @@ -480,10 +486,51 @@ cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, return 0; } +static int +cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, + uint32_t nb_contexts, struct rte_flow_error *err) +{ + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct roc_npc *roc_npc = &dev->npc; + struct roc_npc_flow_age *flow_age; + uint32_t start_id; + uint32_t end_id; + int cnt = 0; + uint32_t sn; + uint32_t i; + + RTE_SET_USED(err); + + flow_age = &roc_npc->flow_age; + + do { + sn = plt_seqcount_read_begin(&flow_age->seq_cnt); + + if (nb_contexts == 0) { + cnt = flow_age->aged_flows_cnt; + } else { + start_id = flow_age->start_id; + end_id = flow_age->end_id; + for (i = start_id; i <= end_id; i++) { + if ((int)nb_contexts == cnt) + break; + if (plt_bitmap_get(flow_age->aged_flows, i)) { + context[cnt] = + roc_npc_aged_flow_ctx_get(roc_npc, i); + cnt++; + } + } + } + } while (plt_seqcount_read_retry(&flow_age->seq_cnt, sn)); + + return cnt; +} + struct rte_flow_ops cnxk_flow_ops = { .validate = cnxk_flow_validate, .flush = cnxk_flow_flush, .query = cnxk_flow_query, .isolate = cnxk_flow_isolate, .dev_dump = cnxk_flow_dev_dump, + .get_aged_flows = cnxk_flow_get_aged_flows, }; diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c index 9d8cd3f0a9d..c799193cb89 100644 --- a/drivers/net/cnxk/cnxk_tm.c +++ b/drivers/net/cnxk/cnxk_tm.c @@ -765,6 +765,9 @@ cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, if (queue_idx >= eth_dev->data->nb_tx_queues) goto exit; + if (roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_PFC) + return roc_nix_tm_pfc_rlimit_sq(nix, queue_idx, tx_rate); + if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) && eth_dev->data->nb_tx_queues > 1) { /* diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h new file mode 100644 index 00000000000..7b82119e396 --- /dev/null +++ b/drivers/net/cpfl/cpfl_actions.h @@ -0,0 +1,858 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2023 Intel Corporation + */ + +#ifndef _CPFL_ACTIONS_H_ +#define _CPFL_ACTIONS_H_ + +#include "base/idpf_osdep.h" + +#pragma pack(1) + +union cpfl_action_set { + uint32_t data; + + struct { + uint32_t val : 24; + uint32_t idx : 4; + uint32_t tag : 1; + uint32_t prec : 3; + } set_24b_a; + + struct { + uint32_t val : 24; + uint32_t idx : 3; + uint32_t tag : 2; + uint32_t prec : 3; + } set_24b_b; + + struct { + uint32_t val : 16; + uint32_t idx : 4; + uint32_t unused : 6; + uint32_t tag : 3; + uint32_t prec : 3; + } set_16b; + + struct { + uint32_t val_a : 8; + uint32_t val_b : 8; + uint32_t idx_a : 4; + uint32_t idx_b : 4; + uint32_t tag : 5; + uint32_t prec : 3; + } set_8b; + + struct { + uint32_t val : 10; + uint32_t ena : 10; + uint32_t idx : 4; + uint32_t tag : 5; + uint32_t prec : 3; + } set_1b; + + struct { + uint32_t val : 24; + uint32_t tag : 5; + uint32_t prec : 3; + } nop; + + struct { + uint32_t val : 24; + uint32_t tag : 5; + uint32_t prec : 3; + } chained_24b; + + struct { + uint32_t val : 24; + uint32_t tag : 5; + uint32_t prec : 3; + } aux_flags; +}; + +struct cpfl_action_set_ext { +#define CPFL_ACTION_SET_EXT_CNT 2 + union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT]; +}; + +#pragma pack() + +/** + * cpfl_act_nop - Encode a NOP action + */ +static inline union cpfl_action_set +cpfl_act_nop(void) +{ + union cpfl_action_set act; + + act.data = 0; + return act; +} + +/** + * cpfl_is_nop_action - Indicate if an action set is a NOP + */ +static inline bool +cpfl_is_nop_action(union cpfl_action_set *act) +{ + return act->data == cpfl_act_nop().data; +} + +#define CPFL_MAKE_MASK32(b, s) ((((uint32_t)1 << (b)) - 1) << (s)) + +#define CPFL_ACT_PREC_MAX 7 +#define CPFL_ACT_PREC_S 29 +#define CPFL_ACT_PREC_M CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S) +#define CPFL_ACT_PREC_SET(p) \ + (((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M) +#define CPFL_ACT_PREC_CHECK(p) ((p) > 0 && (p) <= CPFL_ACT_PREC_MAX) + +#define CPFL_METADATA_ID_CNT 32 /* Max number of metadata IDs */ +#define CPFL_METADATA_STRUCT_MAX_SZ 128 /* Max metadata size per ID */ + +/******************************************************************************* + * 1-Bit Actions + ******************************************************************************/ +#define CPFL_ACT_1B_OP_S 24 +#define CPFL_ACT_1B_OP_M CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S) +#define CPFL_ACT_1B_OP ((uint32_t)(0x01) << CPFL_ACT_1B_OP_S) + +#define CPFL_ACT_1B_VAL_S 0 +#define CPFL_ACT_1B_VAL_M CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S) +#define CPFL_ACT_1B_EN_S 10 +#define CPFL_ACT_1B_EN_M CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S) +#define CPFL_ACT_1B_INDEX_S 20 +#define CPFL_ACT_1B_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S) + +/* 1-bit actions currently uses only INDEX of 0 */ +#define CPFL_ACT_MAKE_1B(prec, en, val) \ + ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \ + ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \ + (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \ + (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M)) + +enum cpfl_act_1b_op { + CPFL_ACT_1B_OP_DROP = 0x01, + CPFL_ACT_1B_OP_HDR_SPLIT = 0x02, + CPFL_ACT_1B_OP_DIR_CHANGE = 0x04, + CPFL_ACT_1B_OP_DEFER_DROP = 0x08, + CPFL_ACT_1B_OP_ORIG_MIR_MD = 0x80 +}; + +#define CPFL_ACT_1B_COMMIT_MODE_S 4 +#define CPFL_ACT_1B_COMMIT_MODE_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S) + +/** + * cpfl_act_commit_mode - action commit mode for certain action classes + */ +enum cpfl_act_commit_mode { + /* Action processing for the initial classification pass */ + CPFL_ACT_COMMIT_ALL = 0, /* Commit all actions */ + CPFL_ACT_COMMIT_PRE_MOD = 1, /* Commit only pre-modify actions*/ + CPFL_ACT_COMMIT_NONE = 2, /* Commit no action */ + /* Action processing for deferred actions in a recirculation pass */ + CPFL_ACT_COMMIT_RECIR_ALL = 4, /* Commit all actions */ + CPFL_ACT_COMMIT_RECIR_PRE_MOD = 5, /* Commit only pre-modify actions*/ + CPFL_ACT_COMMIT_RECIR_NONE = 6 /* Commit no action */ +}; + +/******************************************************************************* + * 8-Bit Actions + ******************************************************************************/ +#define CPFL_ACT_OP_8B_S 24 +#define CPFL_ACT_OP_8B_M CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S) +#define CPFL_ACT_OP_8B ((uint32_t)(0x02) << CPFL_ACT_OP_8B_S) + +#define CPFL_ACT_8B_A_VAL_S 0 +#define CPFL_ACT_8B_A_VAL_M CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S) +#define CPFL_ACT_8B_A_INDEX_S 16 +#define CPFL_ACT_8B_A_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S) + +#define CPFL_ACT_8B_B_VAL_S 8 +#define CPFL_ACT_8B_B_VAL_M CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S) +#define CPFL_ACT_8B_B_INDEX_S 20 +#define CPFL_ACT_8B_B_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S) + +/* Unless combining two 8-bit actions into an action set, both A and B fields + * must be the same, + */ +#define CPFL_ACT_MAKE_8B(prec, idx, val) \ + ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \ + (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \ + (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \ + (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \ + (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M)) + +/* 8-Bit Action Indices */ +#define CPFL_ACT_8B_INDEX_MOD_META 9 + +/* 8-Bit Action Miscellaneous */ +#define CPFL_ACT_8B_MOD_META_PROF_CNT 16 +#define CPFL_ACT_8B_MOD_META_VALID 0x80 + +/******************************************************************************* + * 16-Bit Actions + ******************************************************************************/ +#define CPFL_ACT_OP_16B_S 26 +#define CPFL_ACT_OP_16B_M CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S) +#define CPFL_ACT_OP_16B ((uint32_t)0x1 << CPFL_ACT_OP_16B_S) + +#define CPFL_ACT_16B_INDEX_S 16 +#define CPFL_ACT_16B_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S) +#define CPFL_ACT_16B_VAL_S 0 +#define CPFL_ACT_16B_VAL_M CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S) + +#define CPFL_ACT_MAKE_16B(prec, idx, val) \ + ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \ + (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \ + (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M)) + +/* 16-Bit Action Indices */ +#define CPFL_ACT_16B_INDEX_COUNT_SET 0 +#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX 1 +#define CPFL_ACT_16B_INDEX_SET_VSI 2 +#define CPFL_ACT_16B_INDEX_DEL_MD 4 +#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST 5 + +/* 16-Bit Action Miscellaneous */ +#define CPFL_ACT_16B_COUNT_SET_CNT 2048 /* TODO: Value from NSL */ +#define CPFL_ACT_16B_SET_VSI_SLOTS 2 +#define CPFL_ACT_16B_FWD_VSI_CNT 1032 /* TODO: Value from NSL */ +#define CPFL_ACT_16B_FWD_VSI_LIST_CNT 256 +#define CPFL_ACT_16B_MOD_VSI_LIST_CNT 1024 +#define CPFL_ACT_16B_FWD_PORT_CNT 4 +#define CPFL_ACT_16B_DEL_MD_MID_CNT 32 +#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS 4 + +/* 16-Bit SET_MCAST_IDX Action */ +#define CPFL_ACT_16B_SET_MCAST_VALID ((uint32_t)1 << 15) + +/* 16-Bit SET_VSI Action Variants */ +#define CPFL_ACT_16B_SET_VSI_VAL_S 0 +#define CPFL_ACT_16B_SET_VSI_VAL_M \ + CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S) +#define CPFL_ACT_16B_SET_VSI_PE_S 11 +#define CPFL_ACT_16B_SET_VSI_PE_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S) +#define CPFL_ACT_16B_SET_VSI_TYPE_S 14 +#define CPFL_ACT_16B_SET_VSI_TYPE_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S) + +/* 16-Bit DEL_MD Action */ +#define CPFL_ACT_16B_DEL_MD_0_S 0 +#define CPFL_ACT_16B_DEL_MD_1_S 5 + +/* 16-Bit MOD_VSI_LIST Actions */ +#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S 0 +#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M \ + CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S) +#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S 14 +#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S) +#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \ + ((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \ + CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \ + (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \ + CPFL_ACT_16B_MOD_VSI_LIST_ID_M)) + +#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \ + ((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \ + CPFL_ACT_16B_SET_VSI_TYPE_M) | \ + (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \ + CPFL_ACT_16B_SET_VSI_PE_M) | \ + (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \ + CPFL_ACT_16B_SET_VSI_VAL_M)) + +enum cpfl_prot_eng { + CPFL_PE_LAN = 0, + CPFL_PE_RDMA, + CPFL_PE_CRT +}; + +enum cpfl_act_fwd_type { + CPFL_ACT_FWD_VSI, + CPFL_ACT_FWD_VSI_LIST, + CPFL_ACT_FWD_PORT +}; + +/******************************************************************************* + * 24-Bit Actions + ******************************************************************************/ +/* Group A */ +#define CPFL_ACT_OP_24B_A_S 28 +#define CPFL_ACT_OP_24B_A_M CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S) +#define CPFL_ACT_24B_A_INDEX_S 24 +#define CPFL_ACT_24B_A_INDEX_M CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S) +#define CPFL_ACT_24B_A_VAL_S 0 +#define CPFL_ACT_24B_A_VAL_M CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S) + +#define CPFL_ACT_OP_24B_A ((uint32_t)1 << CPFL_ACT_OP_24B_A_S) + +#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \ + ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \ + (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \ + (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M)) + +#define CPFL_ACT_24B_INDEX_MOD_ADDR 0 +#define CPFL_ACT_24B_INDEX_MIRROR_FIRST 1 +#define CPFL_ACT_24B_INDEX_COUNT 2 +#define CPFL_ACT_24B_INDEX_SET_Q 8 +#define CPFL_ACT_24B_INDEX_MOD_PROFILE 9 +#define CPFL_ACT_24B_INDEX_METER 10 + +#define CPFL_ACT_24B_COUNT_SLOTS 6 +#define CPFL_ACT_24B_METER_SLOTS 6 + +#define CPFL_ACT_24B_MOD_ADDR_CNT (16 * 1024 * 1024) +#define CPFL_ACT_24B_COUNT_ID_CNT ((uint32_t)1 << 24) +#define CPFL_ACT_24B_SET_Q_CNT (12 * 1024) +#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS 3 + +/* 24-Bit SET_Q Action */ +#define CPFL_ACT_24B_SET_Q_Q_S 0 +#define CPFL_ACT_24B_SET_Q_Q_M \ + CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S) +#define CPFL_ACT_24B_SET_Q_Q_RGN_S 14 +#define CPFL_ACT_24B_SET_Q_Q_RGN_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S) +#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS CPFL_MAKE_MASK32(1, 17) +#define CPFL_ACT_24B_SET_Q_DST_PE_S 21 +#define CPFL_ACT_24B_SET_Q_DST_PE_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S) +#define CPFL_ACT_24B_SET_Q_VALID CPFL_MAKE_MASK32(1, 23) + +/* 24-Bit MOD_PROFILE Action */ +enum cpfl_act_mod_profile_hint { + CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */ + CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */ + CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */ +}; + +#define CPFL_ACT_24B_MOD_PROFILE_PROF_S 0 +#define CPFL_ACT_24B_MOD_PROFILE_PROF_M \ + CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S) +#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S 12 +#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) +#define CPFL_ACT_24B_MOD_PROFILE_HINT_S 14 +#define CPFL_ACT_24B_MOD_PROFILE_HINT_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S) +#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS ((uint32_t)1 << 16) +#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND ((uint32_t)1 << 17) +#define CPFL_ACT_24B_MOD_PROFILE_VALID ((uint32_t)1 << 23) + +#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES 4 +#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT 2048 + +/* 24-Bit METER Actions */ +#define CPFL_ACT_24B_METER_INDEX_S 0 +#define CPFL_ACT_24B_METER_INDEX_M \ + CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S) +#define CPFL_ACT_24B_METER_BANK_S 20 +#define CPFL_ACT_24B_METER_BANK_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S) +#define CPFL_ACT_24B_METER_VALID ((uint32_t)1 << 23) + +#define CPFL_ACT_24B_METER_BANK_CNT 6 +#define CPFL_ACT_24B_METER_INDEX_CNT ((uint32_t)1 << 20) + +/* Group B */ +#define CPFL_ACT_OP_24B_B_S 27 +#define CPFL_ACT_OP_24B_B_M CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S) +#define CPFL_ACT_24B_B_INDEX_S 24 +#define CPFL_ACT_24B_B_INDEX_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S) +#define CPFL_ACT_24B_B_VAL_S 0 +#define CPFL_ACT_24B_B_VAL_M CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S) + +#define CPFL_ACT_OP_24B_B ((uint32_t)1 << CPFL_ACT_OP_24B_B_S) + +#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \ + ((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \ + (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \ + (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M)) + +#define CPFL_ACT_24B_INDEX_SET_MD 0 +#define CPFL_ACT_24B_INDEX_RANGE_CHECK 6 +#define CPFL_ACT_24B_SET_MD_SLOTS 6 + +/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */ +/* 8-Bit SET_MD */ +#define CPFL_ACT_24B_SET_MD8_VAL_S 0 +#define CPFL_ACT_24B_SET_MD8_VAL_M \ + CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S) +#define CPFL_ACT_24B_SET_MD8_MASK_S 8 +#define CPFL_ACT_24B_SET_MD8_MASK_M \ + CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S) +#define CPFL_ACT_24B_SET_MD8_OFFSET_S 16 +#define CPFL_ACT_24B_SET_MD8_OFFSET_M \ + CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S) +#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S 20 +#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S) +/* 16-Bit SET_MD */ +#define CPFL_ACT_24B_SET_MD16_VAL_S 0 +#define CPFL_ACT_24B_SET_MD16_VAL_M \ + CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S) +#define CPFL_ACT_24B_SET_MD16_MASK_L_S 16 /* For chained action */ +#define CPFL_ACT_24B_SET_MD16_MASK_L_M \ + CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S) +#define CPFL_ACT_24B_SET_MD16_MASK_H_SR 8 +#define CPFL_ACT_24B_SET_MD16_MASK_H_M 0xff +#define CPFL_ACT_24B_SET_MD16_OFFSET_S 16 +#define CPFL_ACT_24B_SET_MD16_OFFSET_M \ + CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S) +#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S 20 +#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S) +#define CPFL_ACT_24B_SET_MD16 ((uint32_t)1 << 23) + +#define CPFL_ACT_24B_SET_MD32_VAL_L_M CPFL_MAKE_MASK32(24, 0) + +#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX 15 +#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX 7 +#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX 15 +#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX 7 + +/* RANGE_CHECK Action */ +enum cpfl_rule_act_rc_mode { + CPFL_RULE_ACT_RC_1_RANGE = 0, + CPFL_RULE_ACT_RC_2_RANGES = 1, + CPFL_RULE_ACT_RC_4_RANGES = 2, + CPFL_RULE_ACT_RC_8_RANGES = 3 +}; + +#define CPFL_ACT_24B_RC_TBL_IDX_S 0 +#define CPFL_ACT_24B_RC_TBL_IDX_M \ + CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S) +#define CPFL_ACT_24B_RC_START_BANK_S 13 +#define CPFL_ACT_24B_RC_START_BANK_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S) +#define CPFL_ACT_24B_RC_MODE_S 16 +#define CPFL_ACT_24B_RC_MODE_M \ + CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S) +#define CPFL_ACT_24B_RC_XTRACT_PROF_S 18 +#define CPFL_ACT_24B_RC_XTRACT_PROF_M \ + CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S) + +#define CPFL_ACT_24B_RC_TBL_INDEX_CNT (8 * 1024) +#define CPFL_ACT_24B_RC_BANK_CNT 8 +#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT 64 + +/******************************************************************************* + * 24-Bit Chained Auxiliary Actions + ******************************************************************************/ + +/* TODO: HAS is being updated. Revise the order of chained and base action + * when the HAS has it finalized. + */ +/** + * 24-Bit Chained SET_MD Actions + * + * Chained SET_MD actions consume two consecutive action sets. The first one is + * the chained AUX action set. The second one is the base/parent action set. + * Chained SET_MD actions can add and/or update metadata structure with IDs from + * 0 to 31 while the non-chained SET_MD variants can only update existing meta- + * data IDs below 16. + */ + +#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S 8 +#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M \ + CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) +#define CPFL_ACT_24B_SET_MD_AUX_ADD ((uint32_t)1 << 15) +#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S 16 +#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M \ + CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) +#define CPFL_ACT_24B_SET_MD_AUX_DATA_S 0 +#define CPFL_ACT_24B_SET_MD_AUX_DATA_M \ + CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S) + +#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S 0 +#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M \ + CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S) +#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR 24 /* Upper 8 bits of MD32 */ +#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M 0xff + +#define CPFL_ACT_TYPE_CHAIN_DATA_S 29 +#define CPFL_ACT_TYPE_CHAIN_DATA_M \ + CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S) +#define CPFL_ACT_TYPE_CHAIN_DATA ((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S) + +#define CPFL_ACT_24B_SET_MD_OP_S 21 +#define CPFL_ACT_24B_SET_MD_OP_8B ((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S) +#define CPFL_ACT_24B_SET_MD_OP_16B ((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S) +#define CPFL_ACT_24B_SET_MD_OP_32B ((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S) + +#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \ + (CPFL_ACT_TYPE_CHAIN_DATA | (op) | \ + (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \ + CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \ + (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \ + CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \ + (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \ + CPFL_ACT_24B_SET_MD_AUX_DATA_M)) + +/******************************************************************************* + * 1-Bit Action Factory + ******************************************************************************/ + +/** + * cpfl_act_drop - Encode a 1-bit DROP action + * + * The DROP action has precedence over the DEFER_DOP action. + * Affect of ACT_COMMIT action on the DROP action: + * - CPFL_ACT_COMMIT_ALL: Packet is dropped. + * - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped. + * - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped. Recirculation is canceled. + * - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not + * dropped. Recirculation continues. + * + * Once a DROP action is set, it cannot be reverted during the classification + * process of a network packet. + */ +static inline union cpfl_action_set +cpfl_act_drop(uint8_t prec) +{ + union cpfl_action_set a; + + if (!CPFL_ACT_PREC_CHECK(prec)) + return cpfl_act_nop(); + a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1); + return a; +} + +/** + * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action + * An ACT_COMMIT action specifies if and when all actions are committed. + */ +static inline union cpfl_action_set +cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode) +{ + union cpfl_action_set a; + + if (!CPFL_ACT_PREC_CHECK(prec)) + return cpfl_act_nop(); + a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M, + (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S); + return a; +} + +/******************************************************************************* + * 8-Bit Action Factory + ******************************************************************************/ + +/** + * cpfl_act_mod_meta - Encode an 8-bit MOD_META action + */ +static inline union cpfl_action_set +cpfl_act_mod_meta(uint8_t prec, uint8_t prof) +{ + union cpfl_action_set a; + + if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT) + return cpfl_act_nop(); + + a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META, + CPFL_ACT_8B_MOD_META_VALID | prof); + + return a; +} + +/******************************************************************************* + * 16-Bit Action Factory + ******************************************************************************/ + +/** + * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI) + * + * This encodes the "Forward to Single VSI" variant of SET_VSI action. + * SEM can use both SET_VSI action slots. The other classification blocks can + * only use slot 0. + */ +static inline union cpfl_action_set +cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi) +{ + union cpfl_action_set a; + uint32_t val; + + if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS || + vsi >= CPFL_ACT_16B_FWD_VSI_CNT) + return cpfl_act_nop(); + + val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi); + a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot, + val); + + return a; +} + +/** + * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port) + * + * This encodes the "Forward to a port" variant of SET_VSI action. + * SEM can use both SET_VSI action slots. The other classification blocks can + * only use slot 0. + */ +static inline union cpfl_action_set +cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port) +{ + union cpfl_action_set a; + uint32_t val; + + if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS || + port >= CPFL_ACT_16B_FWD_PORT_CNT) + return cpfl_act_nop(); + + val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port); + a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot, + val); + + return a; +} + +/******************************************************************************* + * 24-Bit Action Factory + ******************************************************************************/ + +/** + * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action + * + * This MOD_ADDR specifies the index of the MOD content entry an accompanying + * MOD_PROFILE action uses. Some MOD_PROFILE actions may need to use extra + * information from a Modify content entry, and requires an accompanying + * MOD_ADDR action. + */ +static inline union cpfl_action_set +cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr) +{ + union cpfl_action_set a; + + if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT) + return cpfl_act_nop(); + + a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR, + mod_addr); + + return a; +} + +/** + * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant) + * + * This action is a "Forward to a single queue" variant of the SET_Q action. + * + * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false. + * WCM and LEM never perform Implicit VSI for SET_Q actions. + */ +static inline union cpfl_action_set +cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q, + bool no_implicit_vsi) +{ + union cpfl_action_set a; + uint32_t val; + + if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT) + return cpfl_act_nop(); + + val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q | + (((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) & + CPFL_ACT_24B_SET_Q_DST_PE_M); + if (no_implicit_vsi) + val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS; + a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val); + + return a; +} + +/** + * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region) + * + * This action is a "Forward to a queue region" variant of the SET_Q action. + * + * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false. + * WCM and LEM never perform Implicit VSI for SET_Q actions. + */ +static inline union cpfl_action_set +cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base, + uint8_t q_rgn_bits, bool no_implicit_vsi) +{ + union cpfl_action_set a; + uint32_t val; + + if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT || + q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS) + return cpfl_act_nop(); + + val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base | + ((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) | + (((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) & + CPFL_ACT_24B_SET_Q_DST_PE_M); + if (no_implicit_vsi) + val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS; + a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val); + + return a; +} + +/** + * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action + * + * This action specifies a Modify profile to use for modifying the network + * packet being classified. In addition, it also provides a hint to whether + * or not an accompanied MOD_ADDR action is expected and should be prefetched. + * + * There is only one MOD_PROFILE action slot. If multiple classification blocks + * emit this action, the precedence value and auxiliary precedence value will be + * used to select one with higher precedence. + */ +static inline union cpfl_action_set +cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus, + bool miss_prepend, enum cpfl_act_mod_profile_hint hint) +{ + union cpfl_action_set a; + uint32_t val; + + if (!CPFL_ACT_PREC_CHECK(prec) || + prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT || + ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES) + return cpfl_act_nop(); + + val = CPFL_ACT_24B_MOD_PROFILE_VALID | + (((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) & + CPFL_ACT_24B_MOD_PROFILE_HINT_M) | + (((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) & + CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) | + ((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S); + if (append_act_bus) + val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS; + if (miss_prepend) + val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND; + + a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val); + + return a; +} + +/** + * cpfl_act_meter - Encode a 24-bit METER action + * + * Return NOP if any given input parameter is invalid. + * + * A bank can only be used by one of the METER action slots. If multiple METER + * actions select the same bank, the action with the highest action slot wins. + * In Policer mode, METER actions at the higher indexes have precedence over + * ones at lower indexes. + */ +static inline union cpfl_action_set +cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank) +{ + union cpfl_action_set a; + uint32_t val; + + if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS || + idx >= CPFL_ACT_24B_METER_INDEX_CNT || + bank >= CPFL_ACT_24B_METER_BANK_CNT) + return cpfl_act_nop(); + + val = CPFL_ACT_24B_METER_VALID | + (uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S | + (uint32_t)bank << CPFL_ACT_24B_METER_BANK_S; + a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot, + val); + + return a; +} + +/** + * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot + * + * This SET_MD action sets/updates a byte of a given metadata ID structure + * using one of the SET_MD action slots. This action variant can only set + * one the first 16 bytes of any of the first 7 metadata types. + */ +static inline union cpfl_action_set +cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask) +{ + union cpfl_action_set a; + uint32_t tmp; + + if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS || + mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX || + off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX) + return cpfl_act_nop(); + + tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) | + ((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) | + ((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) | + ((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S); + a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot, + tmp); + + return a; +} + +/** + * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot + * + * This SET_MD action sets/updates a word of a given metadata ID structure + * using one of the SET_MD action slots. This action variant can only set + * one the first 16 words of any of the first 7 metadata types. + */ +static inline union cpfl_action_set +cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val) +{ + union cpfl_action_set a; + uint32_t tmp; + + if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS || + mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX || + word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX) + return cpfl_act_nop(); + + tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) | + ((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) | + ((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) | + ((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S); + a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot, + tmp); + + return a; +} + +/** + * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot + * + * This SET_MD action sets/updates a dword of a given metadata ID structure + * using one of the SET_MD action slots. This action is made up of 2 chained + * action sets. The chained action set is the first. The base/parent action + * sets is the second. + */ +static inline void +cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid, + uint8_t off, uint32_t val) +{ + if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) || + mid >= CPFL_METADATA_ID_CNT || + (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) { + ext->acts[0] = cpfl_act_nop(); + ext->acts[1] = cpfl_act_nop(); + } else { + uint32_t tmp; + + /* Chained action set comes first */ + tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR; + ext->acts[0].data = + CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B, + mid, off, tmp); + + /* Lower 24 bits of value */ + tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M; + ext->acts[1].data = + CPFL_ACT_MAKE_24B_B(prec, + CPFL_ACT_24B_INDEX_SET_MD + slot, + tmp); + } +} + +#endif /* _CPFL_ACTIONS_H_ */ diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c new file mode 100644 index 00000000000..4a925bc3380 --- /dev/null +++ b/drivers/net/cpfl/cpfl_controlq.c @@ -0,0 +1,801 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2023 Intel Corporation + */ + +#include "cpfl_controlq.h" +#include "base/idpf_controlq.h" +#include "rte_common.h" + +/** + * cpfl_check_dma_mem_parameters - verify DMA memory params from CP + * @qinfo: pointer to create control queue info struct + * + * Verify that DMA parameter of each DMA memory struct is present and + * consistent with control queue parameters + */ +static inline int +cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo) +{ + struct idpf_dma_mem *ring = &qinfo->ring_mem; + struct idpf_dma_mem *buf = &qinfo->buf_mem; + + if (!ring->va || !ring->size) + return -EINVAL; + + if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc)) + return -EINVAL; + + /* no need for buffer checks for TX queues */ + if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX || + qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX || + qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX) + return 0; + + if (!buf->va || !buf->size) + return -EINVAL; + + /* accommodate different types of rx ring buffer sizes */ + if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX && + buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) || + (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX && + buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE)) + return -EINVAL; + + return 0; +} + +/** + * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs + * @hw: pointer to hw struct + * @cq: pointer to control queue struct + * @qinfo: pointer to create queue info struct + * + * The CP takes care of all DMA memory allocations. Store the allocated memory + * information for the descriptor ring and buffers. If the memory for either the + * descriptor ring or the buffers is not allocated properly and/or inconsistent + * with the control queue parameters, this routine will free the memory for + * both the descriptors and the buffers + */ +int +cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq, + struct cpfl_ctlq_create_info *qinfo) +{ + int ret_code = 0; + unsigned int elem_size; + int i = 0; + + ret_code = cpfl_check_dma_mem_parameters(qinfo); + if (ret_code) + /* TODO: Log an error message per CP */ + goto err; + + cq->desc_ring.va = qinfo->ring_mem.va; + cq->desc_ring.pa = qinfo->ring_mem.pa; + cq->desc_ring.size = qinfo->ring_mem.size; + + switch (cq->cq_type) { + case IDPF_CTLQ_TYPE_MAILBOX_RX: + case IDPF_CTLQ_TYPE_CONFIG_RX: + case IDPF_CTLQ_TYPE_EVENT_RX: + case IDPF_CTLQ_TYPE_RDMA_RX: + /* Only receive queues will have allocated buffers + * during init. CP allocates one big chunk of DMA + * region who size is equal to ring_len * buff_size. + * In CPFLib, the block gets broken down to multiple + * smaller blocks that actually gets programmed in the hardware. + */ + + cq->bi.rx_buff = (struct idpf_dma_mem **) + idpf_calloc(hw, cq->ring_size, + sizeof(struct idpf_dma_mem *)); + if (!cq->bi.rx_buff) { + ret_code = -ENOMEM; + /* TODO: Log an error message per CP */ + goto err; + } + + elem_size = qinfo->buf_size; + for (i = 0; i < cq->ring_size; i++) { + cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc + (hw, 1, + sizeof(struct idpf_dma_mem)); + if (!cq->bi.rx_buff[i]) { + ret_code = -ENOMEM; + goto free_rx_buffs; + } + cq->bi.rx_buff[i]->va = + (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size)); + cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa + + (i * elem_size); + cq->bi.rx_buff[i]->size = elem_size; + } + break; + case IDPF_CTLQ_TYPE_MAILBOX_TX: + case IDPF_CTLQ_TYPE_CONFIG_TX: + case IDPF_CTLQ_TYPE_RDMA_TX: + case IDPF_CTLQ_TYPE_RDMA_COMPL: + break; + default: + ret_code = -EINVAL; + } + + return ret_code; + +free_rx_buffs: + i--; + for (; i >= 0; i--) + idpf_free(hw, cq->bi.rx_buff[i]); + + if (!cq->bi.rx_buff) + idpf_free(hw, cq->bi.rx_buff); + +err: + return ret_code; +} + +/** + * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf + * @cq: pointer to the specific Control queue + * + * Record the address of the receive queue DMA buffers in the descriptors. + * The buffers must have been previously allocated. + */ +static void +cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) +{ + int i = 0; + + for (i = 0; i < cq->ring_size; i++) { + struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i); + struct idpf_dma_mem *bi = cq->bi.rx_buff[i]; + + /* No buffer to post to descriptor, continue */ + if (!bi) + continue; + + desc->flags = + CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + desc->opcode = 0; + desc->datalen = CPU_TO_LE16(bi->size); + desc->ret_val = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.indirect.addr_high = + CPU_TO_LE32(IDPF_HI_DWORD(bi->pa)); + desc->params.indirect.addr_low = + CPU_TO_LE32(IDPF_LO_DWORD(bi->pa)); + desc->params.indirect.param0 = 0; + desc->params.indirect.param1 = 0; + } +} + +/** + * cpfl_ctlq_setup_regs - initialize control queue registers + * @cq: pointer to the specific control queue + * @q_create_info: structs containing info for each queue to be initialized + */ +static void +cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info) +{ + /* set control queue registers in our local struct */ + cq->reg.head = q_create_info->reg.head; + cq->reg.tail = q_create_info->reg.tail; + cq->reg.len = q_create_info->reg.len; + cq->reg.bah = q_create_info->reg.bah; + cq->reg.bal = q_create_info->reg.bal; + cq->reg.len_mask = q_create_info->reg.len_mask; + cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask; + cq->reg.head_mask = q_create_info->reg.head_mask; +} + +/** + * cpfl_ctlq_init_regs - Initialize control queue registers + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * @is_rxq: true if receive control queue, false otherwise + * + * Initialize registers. The caller is expected to have already initialized the + * descriptor ring memory and buffer memory + */ +static void +cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq) +{ + /* Update tail to post pre-allocated buffers for rx queues */ + if (is_rxq) + wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1)); + + /* For non-Mailbox control queues only TAIL need to be set */ + if (cq->q_id != -1) + return; + + /* Clear Head for both send or receive */ + wr32(hw, cq->reg.head, 0); + + /* set starting point */ + wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa)); + wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa)); + wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); +} + +/** + * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure + * @hw: context info for the callback + * @cq: pointer to the specific control queue + * + * DMA buffers are released by the CP itself + */ +static void +cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq) +{ + int i; + + if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX || + cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) { + for (i = 0; i < cq->ring_size; i++) + idpf_free(hw, cq->bi.rx_buff[i]); + /* free the buffer header */ + idpf_free(hw, cq->bi.rx_buff); + } else { + idpf_free(hw, cq->bi.tx_msg); + } +} + +/** + * cpfl_ctlq_add - add one control queue + * @hw: pointer to hardware struct + * @qinfo: info for queue to be created + * @cq_out: (output) double pointer to control queue to be created + * + * Allocate and initialize a control queue and add it to the control queue list. + * The cq parameter will be allocated/initialized and passed back to the caller + * if no errors occur. + */ +int +cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq_out) +{ + struct idpf_ctlq_info *cq; + bool is_rxq = false; + int status = 0; + + if (!qinfo->len || !qinfo->buf_size || + qinfo->len > IDPF_CTLQ_MAX_RING_SIZE || + qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + cq = (struct idpf_ctlq_info *) + idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info)); + + if (!cq) + return -ENOMEM; + + cq->cq_type = qinfo->type; + cq->q_id = qinfo->id; + cq->buf_size = qinfo->buf_size; + cq->ring_size = qinfo->len; + + cq->next_to_use = 0; + cq->next_to_clean = 0; + cq->next_to_post = cq->ring_size - 1; + + switch (qinfo->type) { + case IDPF_CTLQ_TYPE_EVENT_RX: + case IDPF_CTLQ_TYPE_CONFIG_RX: + case IDPF_CTLQ_TYPE_MAILBOX_RX: + is_rxq = true; + /* fallthrough */ + case IDPF_CTLQ_TYPE_CONFIG_TX: + case IDPF_CTLQ_TYPE_MAILBOX_TX: + status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo); + break; + + default: + status = -EINVAL; + break; + } + + if (status) + goto init_free_q; + + if (is_rxq) { + cpfl_ctlq_init_rxq_bufs(cq); + } else { + /* Allocate the array of msg pointers for TX queues */ + cq->bi.tx_msg = (struct idpf_ctlq_msg **) + idpf_calloc(hw, qinfo->len, + sizeof(struct idpf_ctlq_msg *)); + if (!cq->bi.tx_msg) { + status = -ENOMEM; + goto init_dealloc_q_mem; + } + } + + cpfl_ctlq_setup_regs(cq, qinfo); + + cpfl_ctlq_init_regs(hw, cq, is_rxq); + + idpf_init_lock(&cq->cq_lock); + + LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list); + + *cq_out = cq; + return status; + +init_dealloc_q_mem: + /* free ring buffers and the ring itself */ + cpfl_ctlq_dealloc_ring_res(hw, cq); +init_free_q: + idpf_free(hw, cq); + cq = NULL; + + return status; +} + +/** + * cpfl_ctlq_send - send command to Control Queue (CTQ) + * @hw: pointer to hw struct + * @cq: handle to control queue struct to send on + * @num_q_msg: number of messages to send on control queue + * @q_msg: pointer to array of queue messages to be sent + * + * The caller is expected to allocate DMAable buffers and pass them to the + * send routine via the q_msg struct / control queue specific data struct. + * The control queue will hold a reference to each send message until + * the completion for that message has been cleaned. + */ +int +cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_desc *desc; + int num_desc_avail = 0; + int status = 0; + int i = 0; + + if (!cq || !cq->ring_size) + return -ENOBUFS; + + idpf_acquire_lock(&cq->cq_lock); + + /* Ensure there are enough descriptors to send all messages */ + num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); + if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { + status = -ENOSPC; + goto sq_send_command_out; + } + + for (i = 0; i < num_q_msg; i++) { + struct idpf_ctlq_msg *msg = &q_msg[i]; + + desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); + desc->opcode = CPU_TO_LE16(msg->opcode); + desc->pfid_vfid = CPU_TO_LE16(msg->func_id); + desc->cookie_high = + CPU_TO_LE32(msg->cookie.mbx.chnl_opcode); + desc->cookie_low = + CPU_TO_LE32(msg->cookie.mbx.chnl_retval); + desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) << + IDPF_CTLQ_FLAG_HOST_ID_S); + if (msg->data_len) { + struct idpf_dma_mem *buff = msg->ctx.indirect.payload; + + desc->datalen |= CPU_TO_LE16(msg->data_len); + desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF); + desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD); + /* Update the address values in the desc with the pa + * value for respective buffer + */ + desc->params.indirect.addr_high = + CPU_TO_LE32(IDPF_HI_DWORD(buff->pa)); + desc->params.indirect.addr_low = + CPU_TO_LE32(IDPF_LO_DWORD(buff->pa)); + idpf_memcpy(&desc->params, msg->ctx.indirect.context, + IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA); + } else { + idpf_memcpy(&desc->params, msg->ctx.direct, + IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA); + } + + /* Store buffer info */ + cq->bi.tx_msg[cq->next_to_use] = msg; + (cq->next_to_use)++; + if (cq->next_to_use == cq->ring_size) + cq->next_to_use = 0; + } + + /* Force memory write to complete before letting hardware + * know that there are new descriptors to fetch. + */ + idpf_wmb(); + wr32(hw, cq->reg.tail, cq->next_to_use); + +sq_send_command_out: + idpf_release_lock(&cq->cq_lock); + + return status; +} + +/** + * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write + * back for the requested queue + * @cq: pointer to the specific Control queue + * @clean_count: (input|output) number of descriptors to clean as input, and + * number of descriptors actually cleaned as output + * @msg_status: (output) pointer to msg pointer array to be populated; needs + * to be allocated by caller + * @force: (input) clean descriptors which were not done yet. Use with caution + * in kernel mode only + * + * Returns an array of message pointers associated with the cleaned + * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned + * descriptors. The status will be returned for each; any messages that failed + * to send will have a non-zero status. The caller is expected to free original + * ctlq_msgs and free or reuse the DMA buffers. + */ +static int +__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[], bool force) +{ + struct idpf_ctlq_desc *desc; + uint16_t i = 0, num_to_clean; + uint16_t ntc, desc_err; + int ret = 0; + + if (!cq || !cq->ring_size) + return -ENOBUFS; + + if (*clean_count == 0) + return 0; + if (*clean_count > cq->ring_size) + return -EINVAL; + + idpf_acquire_lock(&cq->cq_lock); + ntc = cq->next_to_clean; + num_to_clean = *clean_count; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD)) + break; + + desc_err = LE16_TO_CPU(desc->ret_val); + if (desc_err) { + /* strip off FW internal code */ + desc_err &= 0xff; + } + + msg_status[i] = cq->bi.tx_msg[ntc]; + if (!msg_status[i]) + break; + msg_status[i]->status = desc_err; + cq->bi.tx_msg[ntc] = NULL; + /* Zero out any stale data */ + idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM); + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + } + + cq->next_to_clean = ntc; + idpf_release_lock(&cq->cq_lock); + + /* Return number of descriptors actually cleaned */ + *clean_count = i; + + return ret; +} + +/** + * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the + * requested queue + * @cq: pointer to the specific Control queue + * @clean_count: (input|output) number of descriptors to clean as input, and + * number of descriptors actually cleaned as output + * @msg_status: (output) pointer to msg pointer array to be populated; needs + * to be allocated by caller + * + * Returns an array of message pointers associated with the cleaned + * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned + * descriptors. The status will be returned for each; any messages that failed + * to send will have a non-zero status. The caller is expected to free original + * ctlq_msgs and free or reuse the DMA buffers. + */ +int +cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[]) +{ + return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false); +} + +/** + * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring + * @hw: pointer to hw struct + * @cq: pointer to control queue handle + * @buff_count: (input|output) input is number of buffers caller is trying to + * return; output is number of buffers that were not posted + * @buffs: array of pointers to dma mem structs to be given to hardware + * + * Caller uses this function to return DMA buffers to the descriptor ring after + * consuming them; buff_count will be the number of buffers. + * + * Note: this function needs to be called after a receive call even + * if there are no DMA buffers to be returned, i.e. buff_count = 0, + * buffs = NULL to support direct commands + */ +int +cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t *buff_count, struct idpf_dma_mem **buffs) +{ + struct idpf_ctlq_desc *desc; + uint16_t ntp = cq->next_to_post; + bool buffs_avail = false; + uint16_t tbp = ntp + 1; + int status = 0; + int i = 0; + + if (*buff_count > cq->ring_size) + return -EINVAL; + + if (*buff_count > 0) + buffs_avail = true; + idpf_acquire_lock(&cq->cq_lock); + if (tbp >= cq->ring_size) + tbp = 0; + + if (tbp == cq->next_to_clean) + /* Nothing to do */ + goto post_buffs_out; + + /* Post buffers for as many as provided or up until the last one used */ + while (ntp != cq->next_to_clean) { + desc = IDPF_CTLQ_DESC(cq, ntp); + if (cq->bi.rx_buff[ntp]) + goto fill_desc; + if (!buffs_avail) { + /* If the caller hasn't given us any buffers or + * there are none left, search the ring itself + * for an available buffer to move to this + * entry starting at the next entry in the ring + */ + tbp = ntp + 1; + /* Wrap ring if necessary */ + if (tbp >= cq->ring_size) + tbp = 0; + + while (tbp != cq->next_to_clean) { + if (cq->bi.rx_buff[tbp]) { + cq->bi.rx_buff[ntp] = + cq->bi.rx_buff[tbp]; + cq->bi.rx_buff[tbp] = NULL; + + /* Found a buffer, no need to + * search anymore + */ + break; + } + + /* Wrap ring if necessary */ + tbp++; + if (tbp >= cq->ring_size) + tbp = 0; + } + + if (tbp == cq->next_to_clean) + goto post_buffs_out; + } else { + /* Give back pointer to DMA buffer */ + cq->bi.rx_buff[ntp] = buffs[i]; + i++; + + if (i >= *buff_count) + buffs_avail = false; + } + +fill_desc: + desc->flags = + CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + + /* Post buffers to descriptor */ + desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size); + desc->params.indirect.addr_high = + CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa)); + desc->params.indirect.addr_low = + CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa)); + + ntp++; + if (ntp == cq->ring_size) + ntp = 0; + } + +post_buffs_out: + /* Only update tail if buffers were actually posted */ + if (cq->next_to_post != ntp) { + if (ntp) + /* Update next_to_post to ntp - 1 since current ntp + * will not have a buffer + */ + cq->next_to_post = ntp - 1; + else + /* Wrap to end of end ring since current ntp is 0 */ + cq->next_to_post = cq->ring_size - 1; + + wr32(hw, cq->reg.tail, cq->next_to_post); + } + + idpf_release_lock(&cq->cq_lock); + /* return the number of buffers that were not posted */ + *buff_count = *buff_count - i; + + return status; +} + +/** + * cpfl_ctlq_recv - receive control queue message call back + * @cq: pointer to control queue handle to receive on + * @num_q_msg: (input|output) input number of messages that should be received; + * output number of messages actually received + * @q_msg: (output) array of received control queue messages on this q; + * needs to be pre-allocated by caller for as many messages as requested + * + * Called by interrupt handler or polling mechanism. Caller is expected + * to free buffers + */ +int +cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg, + struct idpf_ctlq_msg *q_msg) +{ + uint16_t num_to_clean, ntc, ret_val, flags; + struct idpf_ctlq_desc *desc; + int ret_code = 0; + uint16_t i = 0; + + if (!cq || !cq->ring_size) + return -ENOBUFS; + + if (*num_q_msg == 0) + return 0; + else if (*num_q_msg > cq->ring_size) + return -EINVAL; + + /* take the lock before we start messing with the ring */ + idpf_acquire_lock(&cq->cq_lock); + ntc = cq->next_to_clean; + num_to_clean = *num_q_msg; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + flags = LE16_TO_CPU(desc->flags); + if (!(flags & IDPF_CTLQ_FLAG_DD)) + break; + + ret_val = LE16_TO_CPU(desc->ret_val); + q_msg[i].vmvf_type = (flags & + (IDPF_CTLQ_FLAG_FTYPE_VM | + IDPF_CTLQ_FLAG_FTYPE_PF)) >> + IDPF_CTLQ_FLAG_FTYPE_S; + + if (flags & IDPF_CTLQ_FLAG_ERR) + ret_code = -EBADMSG; + + q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high); + q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low); + q_msg[i].opcode = LE16_TO_CPU(desc->opcode); + q_msg[i].data_len = LE16_TO_CPU(desc->datalen); + q_msg[i].status = ret_val; + + if (desc->datalen) { + idpf_memcpy(q_msg[i].ctx.indirect.context, + &desc->params.indirect, + IDPF_INDIRECT_CTX_SIZE, + IDPF_DMA_TO_NONDMA); + + /* Assign pointer to dma buffer to ctlq_msg array + * to be given to upper layer + */ + q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc]; + + /* Zero out pointer to DMA buffer info; + * will be repopulated by post buffers API + */ + cq->bi.rx_buff[ntc] = NULL; + } else { + idpf_memcpy(q_msg[i].ctx.direct, + desc->params.raw, + IDPF_DIRECT_CTX_SIZE, + IDPF_DMA_TO_NONDMA); + } + + /* Zero out stale data in descriptor */ + idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc), + IDPF_DMA_MEM); + + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + }; + + cq->next_to_clean = ntc; + idpf_release_lock(&cq->cq_lock); + *num_q_msg = i; + if (*num_q_msg == 0) + ret_code = -ENOMSG; + + return ret_code; +} + +int +cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq) +{ + return cpfl_ctlq_add(hw, qinfo, cq); +} + +/** + * cpfl_ctlq_shutdown - shutdown the CQ + * The main shutdown routine for any controq queue + */ +static void +cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + idpf_acquire_lock(&cq->cq_lock); + + if (!cq->ring_size) + goto shutdown_sq_out; + + /* free ring buffers and the ring itself */ + cpfl_ctlq_dealloc_ring_res(hw, cq); + + /* Set ring_size to 0 to indicate uninitialized queue */ + cq->ring_size = 0; + +shutdown_sq_out: + idpf_release_lock(&cq->cq_lock); + idpf_destroy_lock(&cq->cq_lock); +} + +/** + * cpfl_ctlq_remove - deallocate and remove specified control queue + */ +static void +cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + LIST_REMOVE(cq, cq_list); + cpfl_ctlq_shutdown(hw, cq); + idpf_free(hw, cq); +} + +void +cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + cpfl_ctlq_remove(hw, cq); +} + +int +cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) +{ + return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg); +} + +int +cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + return cpfl_ctlq_recv(cq, num_q_msg, q_msg); +} + +int +cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t *buff_count, struct idpf_dma_mem **buffs) +{ + return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs); +} + +int +cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[]) +{ + return cpfl_ctlq_clean_sq(cq, clean_count, msg_status); +} diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h new file mode 100644 index 00000000000..740ae6522cc --- /dev/null +++ b/drivers/net/cpfl/cpfl_controlq.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2023 Intel Corporation + */ + +#ifndef _CPFL_CONTROLQ_H_ +#define _CPFL_CONTROLQ_H_ + +#include "base/idpf_osdep.h" +#include "base/idpf_controlq_api.h" + +#define CPFL_CTLQ_DESCRIPTOR_SIZE 32 +#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE 4096 +#define CPFL_CTLQ_CFGQ_BUFFER_SIZE 256 +#define CPFL_DFLT_MBX_RING_LEN 512 +#define CPFL_CFGQ_RING_LEN 512 + +/* CRQ/CSQ specific error codes */ +#define CPFL_ERR_CTLQ_ERROR -74 /* -EBADMSG */ +#define CPFL_ERR_CTLQ_TIMEOUT -110 /* -ETIMEDOUT */ +#define CPFL_ERR_CTLQ_FULL -28 /* -ENOSPC */ +#define CPFL_ERR_CTLQ_NO_WORK -42 /* -ENOMSG */ +#define CPFL_ERR_CTLQ_EMPTY -105 /* -ENOBUFS */ + +/* Generic queue info structures */ +/* MB, CONFIG and EVENT q do not have extended info */ +struct cpfl_ctlq_create_info { + enum idpf_ctlq_type type; + int id; /* absolute queue offset passed as input + * -1 for default mailbox if present + */ + uint16_t len; /* Queue length passed as input */ + uint16_t buf_size; /* buffer size passed as input */ + uint64_t base_address; /* output, HPA of the Queue start */ + struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */ + /* Pass down previously allocated descriptor ring and buffer memory + * for each control queue to be created + */ + struct idpf_dma_mem ring_mem; + /* The CP will allocate one large buffer that the CPFlib will piece + * into individual buffers for each descriptor + */ + struct idpf_dma_mem buf_mem; + + int ext_info_size; + void *ext_info; /* Specific to q type */ +}; + +int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw, + struct idpf_ctlq_info *cq, + struct cpfl_ctlq_create_info *qinfo); +int cpfl_ctlq_add(struct idpf_hw *hw, + struct cpfl_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq); +int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 num_q_msg, struct idpf_ctlq_msg q_msg[]); +int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, + struct idpf_ctlq_msg *msg_status[]); +int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 *buff_count, struct idpf_dma_mem **buffs); +int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + struct idpf_ctlq_msg *q_msg); +int cpfl_vport_ctlq_add(struct idpf_hw *hw, + struct cpfl_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq); +void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq); +int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 num_q_msg, struct idpf_ctlq_msg q_msg[]); +int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + struct idpf_ctlq_msg q_msg[]); + +int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 *buff_count, struct idpf_dma_mem **buffs); +int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, + struct idpf_ctlq_msg *msg_status[]); +#endif diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h new file mode 100644 index 00000000000..2eefcbcc108 --- /dev/null +++ b/drivers/net/cpfl/cpfl_cpchnl.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_CPCHNL_H_ +#define _CPFL_CPCHNL_H_ + +/** @brief Command Opcodes + * Values are to be different from virtchnl.h opcodes + */ +enum cpchnl2_ops { + /* vport info */ + CPCHNL2_OP_GET_VPORT_LIST = 0x8025, + CPCHNL2_OP_GET_VPORT_INFO = 0x8026, + + /* DPHMA Event notifications */ + CPCHNL2_OP_EVENT = 0x8050, +}; + +/* Note! This affects the size of structs below */ +#define CPCHNL2_MAX_TC_AMOUNT 8 + +#define CPCHNL2_ETH_LENGTH_OF_ADDRESS 6 + +#define CPCHNL2_FUNC_TYPE_PF 0 +#define CPCHNL2_FUNC_TYPE_SRIOV 1 + +/* vport statuses - must match the DB ones - see enum cp_vport_status*/ +#define CPCHNL2_VPORT_STATUS_CREATED 0 +#define CPCHNL2_VPORT_STATUS_ENABLED 1 +#define CPCHNL2_VPORT_STATUS_DISABLED 2 +#define CPCHNL2_VPORT_STATUS_DESTROYED 3 + +/* Queue Groups Extension */ +/**************************************************/ + +#define MAX_Q_REGIONS 16 +/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer + * no more than 11 queue groups are allowed per a single vport.. + * More will be possible only with future msg fragmentation. + */ +#define MAX_Q_VPORT_GROUPS 11 + +#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X \ + { static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) } + +struct cpchnl2_queue_chunk { + u32 type; /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */ + u32 start_queue_id; + u32 num_queues; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk); + +/* structure to specify several chunks of contiguous queues */ +struct cpchnl2_queue_grp_chunks { + u16 num_chunks; + u8 reserved[6]; + struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS]; +}; +CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks); + +struct cpchnl2_rx_queue_group_info { + /* User can ask to update rss_lut size originally allocated + * by CreateVport command. New size will be returned if allocation succeeded, + * otherwise original rss_size from CreateVport will be returned. + */ + u16 rss_lut_size; + u8 pad[6]; /*Future extension purpose*/ +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info); + +struct cpchnl2_tx_queue_group_info { + u8 tx_tc; /*TX TC queue group will be connected to*/ + /* Each group can have its own priority, value 0-7, while each group with unique + * priority is strict priority. It can be single set of queue groups which configured with + * same priority, then they are assumed part of WFQ arbitration group and are expected to be + * assigned with weight. + */ + u8 priority; + /* Determines if queue group is expected to be Strict Priority according to its priority */ + u8 is_sp; + u8 pad; + /* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set. + * The weights of the groups are independent of each other. Possible values: 1-200. + */ + u16 pir_weight; + /* Future extension purpose for CIR only */ + u8 cir_pad[2]; + u8 pad2[8]; /* Future extension purpose*/ +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info); + +struct cpchnl2_queue_group_id { + /* Queue group ID - depended on it's type: + * Data & p2p - is an index which is relative to Vport. + * Config & Mailbox - is an ID which is relative to func. + * This ID is used in future calls, i.e. delete. + * Requested by host and assigned by Control plane. + */ + u16 queue_group_id; + /* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */ + u16 queue_group_type; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id); + +struct cpchnl2_queue_group_info { + /* IN */ + struct cpchnl2_queue_group_id qg_id; + + /* IN, Number of queues of different types in the group. */ + u16 num_tx_q; + u16 num_tx_complq; + u16 num_rx_q; + u16 num_rx_bufq; + + struct cpchnl2_tx_queue_group_info tx_q_grp_info; + struct cpchnl2_rx_queue_group_info rx_q_grp_info; + + u8 egress_port; + u8 pad[39]; /*Future extension purpose*/ + struct cpchnl2_queue_grp_chunks chunks; +}; +CPCHNL2_CHECK_STRUCT_LEN(344, cpchnl2_queue_group_info); + +struct cpchnl2_queue_groups { + u16 num_queue_groups; /* Number of queue groups in struct below */ + u8 pad[6]; + /* group information , number is determined by param above */ + struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS]; +}; +CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups); + +/** + * @brief function types + */ +enum cpchnl2_func_type { + CPCHNL2_FTYPE_LAN_PF = 0, + CPCHNL2_FTYPE_LAN_VF = 1, + CPCHNL2_FTYPE_LAN_MAX +}; + +/** + * @brief containing vport id & type + */ +struct cpchnl2_vport_id { + u32 vport_id; + u16 vport_type; + u8 pad[2]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_vport_id); + +struct cpchnl2_func_id { + /* Function type: 0 - LAN PF, 1 - LAN VF, Rest - "reserved" */ + u8 func_type; + /* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs + * and 8-12 CPFs are valid + */ + u8 pf_id; + /* Valid only if "type" above is VF, indexing is relative to PF specified above. */ + u16 vf_id; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_func_id); + +/* Note! Do not change the fields and especially their order as should eventually + * be aligned to 32bit. Must match the virtchnl structure definition. + * If should change, change also the relevant FAS and virtchnl code, under permission. + */ +struct cpchnl2_vport_info { + u16 vport_index; + /* VSI index, global indexing aligned to HW. + * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command. + * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev". + */ + u16 vsi_id; + u8 vport_status; /* enum cpchnl2_vport_status */ + /* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */ + u8 func_type; + /* Valid only if "type" above is VF, indexing is relative to PF specified above. */ + u16 vf_id; + /* Always relevant, indexing is according to LAN PE 0-15, + * while only 0-4 APFs and 8-12 CPFs are valid. + */ + u8 pf_id; + u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */ + /* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */ + u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS]; + u16 vmrl_id; + /* Indicates if IMC created SEM MAC rule for this Vport. + * Currently this is done by IMC for all Vport of type "Default" only, + * but can be different in the future. + */ + u8 sem_mac_rule_exist; + /* Bitmask to inform which TC is valid. + * 0x1 << TCnum. 1b: valid else 0. + * Driven by Node Policy on system level, then Sysetm level TCs are + * reported to IDPF and it can enable Vport level TCs on TX according + * to Syetm enabled ones. + * If TC aware mode - bit set for valid TC. + * otherwise =1 (only bit 0 is set. represents the VSI + */ + u8 tx_tc_bitmask; + /* For each valid TC, TEID of VPORT node over TC in TX LAN WS. + * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID + */ + u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT]; + /* For each valid TC, bandwidth in mbps. + * Default BW per Vport is from Node policy + * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth + */ + u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT]; + /* From Node Policy. */ + u16 max_mtu; + u16 default_rx_qid; /* Default LAN RX Queue ID */ + u16 vport_flags; /* see: VPORT_FLAGS */ + u8 egress_port; + u8 pad_reserved[5]; +}; +CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info); + +/* + * CPCHNL2_OP_GET_VPORT_LIST + */ + +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request + * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type) + * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 + * CPFs are valid + * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above + */ +struct cpchnl2_get_vport_list_request { + u8 func_type; + u8 pf_id; + u16 vf_id; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request); + +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response + * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to + * other PE types + * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 + * CPFs are valid + * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above + * @param nof_vports Number of vports created on the function + * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in + * Create Vport + * vport_type: Aligned to VirtChnl types: Default, SIOV, etc. + */ +struct cpchnl2_get_vport_list_response { + u8 func_type; + u8 pf_id; + u16 vf_id; + u16 nof_vports; + u8 pad[2]; + struct cpchnl2_vport_id vports[]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_response); + +/* + * CPCHNL2_OP_GET_VPORT_INFO + */ +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request + * @param vport a structure containing vport_id (relative to function) and type + * @param func a structure containing function type, pf_id, vf_id + */ +struct cpchnl2_get_vport_info_request { + struct cpchnl2_vport_id vport; + struct cpchnl2_func_id func; +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_get_vport_info_request); + +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response + * @param vport a structure containing vport_id (relative to function) and type to get info for + * @param info a structure all the information for a given vport + * @param queue_groups a structure containing all the queue groups of the given vport + */ +struct cpchnl2_get_vport_info_response { + struct cpchnl2_vport_id vport; + struct cpchnl2_vport_info info; + struct cpchnl2_queue_groups queue_groups; +}; +CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_get_vport_info_response); + + /* Cpchnl events + * Sends event message to inform the peer of notification that may affect it. + * No direct response is expected from the peer, though it may generate other + * messages in response to this one. + */ +enum cpchnl2_event { + CPCHNL2_EVENT_UNKNOWN = 0, + CPCHNL2_EVENT_VPORT_CREATED, + CPCHNL2_EVENT_VPORT_DESTROYED, + CPCHNL2_EVENT_VPORT_ENABLED, + CPCHNL2_EVENT_VPORT_DISABLED, + CPCHNL2_PKG_EVENT, + CPCHNL2_EVENT_ADD_QUEUE_GROUPS, + CPCHNL2_EVENT_DEL_QUEUE_GROUPS, + CPCHNL2_EVENT_ADD_QUEUES, + CPCHNL2_EVENT_DEL_QUEUES +}; + +/* + * This is for CPCHNL2_EVENT_VPORT_CREATED + */ +struct cpchnl2_event_vport_created { + struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */ + struct cpchnl2_vport_info info; /* Vport configuration info */ + struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */ +}; +CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_event_vport_created); + +/* + * This is for CPCHNL2_EVENT_VPORT_DESTROYED + */ +struct cpchnl2_event_vport_destroyed { + /* Vport identifier to point to specific Vport */ + struct cpchnl2_vport_id vport; + struct cpchnl2_func_id func; +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_event_vport_destroyed); + +struct cpchnl2_event_info { + struct { + s32 type; /* See enum cpchnl2_event */ + uint8_t reserved[4]; /* Reserved */ + } header; + union { + struct cpchnl2_event_vport_created vport_created; + struct cpchnl2_event_vport_destroyed vport_destroyed; + } data; +}; + +#endif /* _CPFL_CPCHNL_H_ */ diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index c4ca9343c3e..890a027a1da 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -10,23 +10,41 @@ #include #include #include +#include #include "cpfl_ethdev.h" +#include #include "cpfl_rxtx.h" +#include "cpfl_flow.h" +#include "cpfl_rules.h" +#define CPFL_REPRESENTOR "representor" #define CPFL_TX_SINGLE_Q "tx_single" #define CPFL_RX_SINGLE_Q "rx_single" #define CPFL_VPORT "vport" +#ifdef RTE_HAS_JANSSON +#define CPFL_FLOW_PARSER "flow_parser" +#endif + rte_spinlock_t cpfl_adapter_lock; /* A list for all adapters, one adapter matches one PCI device */ struct cpfl_adapter_list cpfl_adapter_list; bool cpfl_adapter_list_init; -static const char * const cpfl_valid_args[] = { +static const char * const cpfl_valid_args_first[] = { + CPFL_REPRESENTOR, CPFL_TX_SINGLE_Q, CPFL_RX_SINGLE_Q, CPFL_VPORT, +#ifdef RTE_HAS_JANSSON + CPFL_FLOW_PARSER, +#endif + NULL +}; + +static const char * const cpfl_valid_args_again[] = { + CPFL_REPRESENTOR, NULL }; @@ -1058,6 +1076,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev) return 0; } +static void +cpfl_flow_free(struct cpfl_vport *vport) +{ + struct rte_flow *p_flow; + + while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) { + TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next); + if (p_flow->engine->free) + p_flow->engine->free(p_flow); + rte_free(p_flow); + } +} + static int cpfl_p2p_queue_grps_del(struct idpf_vport *vport) { @@ -1089,6 +1120,7 @@ cpfl_dev_close(struct rte_eth_dev *dev) if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) cpfl_p2p_queue_grps_del(vport); + cpfl_flow_free(cpfl_vport); idpf_vport_deinit(vport); rte_free(cpfl_vport->p2p_q_chunks_info); @@ -1096,11 +1128,35 @@ cpfl_dev_close(struct rte_eth_dev *dev) adapter->cur_vport_nb--; dev->data->dev_private = NULL; adapter->vports[vport->sw_idx] = NULL; + idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma); rte_free(cpfl_vport); return 0; } +static int +cpfl_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + struct cpfl_itf *itf; + + if (!dev) + return -EINVAL; + + itf = CPFL_DEV_TO_ITF(dev); + + /* only vport support rte_flow */ + if (itf->type != CPFL_ITF_TYPE_VPORT) + return -ENOTSUP; +#ifdef RTE_HAS_JANSSON + *ops = &cpfl_flow_ops; +#else + *ops = NULL; + PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library."); +#endif + return 0; +} + static int cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, size_t len, uint32_t tx) @@ -1302,6 +1358,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .xstats_get = cpfl_dev_xstats_get, .xstats_get_names = cpfl_dev_xstats_get_names, .xstats_reset = cpfl_dev_xstats_reset, + .flow_ops_get = cpfl_dev_flow_ops_get, .hairpin_cap_get = cpfl_hairpin_cap_get, .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup, .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup, @@ -1407,19 +1464,161 @@ parse_bool(const char *key, const char *value, void *args) } static int -cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, - struct cpfl_devargs *cpfl_args) +enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val) +{ + uint16_t i; + + for (i = 0; i < *len_list; i++) { + if (list[i] == val) + return 0; + } + if (*len_list >= max_list) + return -1; + list[(*len_list)++] = val; + return 0; +} + +static const char * +process_range(const char *str, uint16_t *list, uint16_t *len_list, + const uint16_t max_list) +{ + uint16_t lo, hi, val; + int result, n = 0; + const char *pos = str; + + result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n); + if (result == 1) { + if (enlist(list, len_list, max_list, lo) != 0) + return NULL; + } else if (result == 2) { + if (lo > hi) + return NULL; + for (val = lo; val <= hi; val++) { + if (enlist(list, len_list, max_list, val) != 0) + return NULL; + } + } else { + return NULL; + } + return pos + n; +} + +static const char * +process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list) +{ + const char *pos = str; + + if (*pos == '[') + pos++; + while (1) { + pos = process_range(pos, list, len_list, max_list); + if (pos == NULL) + return NULL; + if (*pos != ',') /* end of list */ + break; + pos++; + } + if (*str == '[' && *pos != ']') + return NULL; + if (*pos == ']') + pos++; + return pos; +} + +static int +parse_repr(const char *key __rte_unused, const char *value, void *args) +{ + struct cpfl_devargs *devargs = args; + struct rte_eth_devargs *eth_da; + const char *str = value; + + if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX) + return -EINVAL; + + eth_da = &devargs->repr_args[devargs->repr_args_num]; + + if (str[0] == 'c') { + str += 1; + str = process_list(str, eth_da->mh_controllers, + ð_da->nb_mh_controllers, + RTE_DIM(eth_da->mh_controllers)); + if (str == NULL) + goto done; + } + if (str[0] == 'p' && str[1] == 'f') { + eth_da->type = RTE_ETH_REPRESENTOR_PF; + str += 2; + str = process_list(str, eth_da->ports, + ð_da->nb_ports, RTE_DIM(eth_da->ports)); + if (str == NULL || str[0] == '\0') + goto done; + } else if (eth_da->nb_mh_controllers > 0) { + /* 'c' must followed by 'pf'. */ + str = NULL; + goto done; + } + if (str[0] == 'v' && str[1] == 'f') { + eth_da->type = RTE_ETH_REPRESENTOR_VF; + str += 2; + } else if (str[0] == 's' && str[1] == 'f') { + eth_da->type = RTE_ETH_REPRESENTOR_SF; + str += 2; + } else { + /* 'pf' must followed by 'vf' or 'sf'. */ + if (eth_da->type == RTE_ETH_REPRESENTOR_PF) { + str = NULL; + goto done; + } + eth_da->type = RTE_ETH_REPRESENTOR_VF; + } + str = process_list(str, eth_da->representor_ports, + ð_da->nb_representor_ports, + RTE_DIM(eth_da->representor_ports)); +done: + if (str == NULL) { + RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str); + return -1; + } + + devargs->repr_args_num++; + + return 0; +} + +#ifdef RTE_HAS_JANSSON +static int +parse_file(const char *key, const char *value, void *args) +{ + char *name = args; + + if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) { + PMD_DRV_LOG(ERR, "file path(%s) is too long.", value); + return -1; + } + + PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key); + strlcpy(name, value, CPFL_FLOW_FILE_LEN); + + return 0; +} +#endif + +static int +cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first) { struct rte_devargs *devargs = pci_dev->device.devargs; + struct cpfl_devargs *cpfl_args = &adapter->devargs; struct rte_kvargs *kvlist; - int i, ret; - - cpfl_args->req_vport_nb = 0; + int ret; if (devargs == NULL) return 0; - kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args); + if (first) + memset(cpfl_args, 0, sizeof(struct cpfl_devargs)); + + kvlist = rte_kvargs_parse(devargs->args, + first ? cpfl_valid_args_first : cpfl_valid_args_again); if (kvlist == NULL) { PMD_INIT_LOG(ERR, "invalid kvargs key"); return -EINVAL; @@ -1430,6 +1629,14 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap return -EINVAL; } + ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args); + + if (ret != 0) + goto fail; + + if (!first) + return 0; + ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport, cpfl_args); if (ret != 0) @@ -1444,71 +1651,73 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap &adapter->base.is_rx_singleq); if (ret != 0) goto fail; - - /* check parsed devargs */ - if (adapter->cur_vport_nb + cpfl_args->req_vport_nb > - adapter->max_vport_nb) { - PMD_INIT_LOG(ERR, "Total vport number can't be > %d", - adapter->max_vport_nb); - ret = -EINVAL; - goto fail; - } - - for (i = 0; i < cpfl_args->req_vport_nb; i++) { - if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) { - PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d", - cpfl_args->req_vports[i], adapter->max_vport_nb - 1); - ret = -EINVAL; - goto fail; - } - - if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) { - PMD_INIT_LOG(ERR, "Vport %d has been requested", - cpfl_args->req_vports[i]); - ret = -EINVAL; +#ifdef RTE_HAS_JANSSON + if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) { + ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER, + &parse_file, cpfl_args->flow_parser); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret); goto fail; } + } else { + cpfl_args->flow_parser[0] = '\0'; } - +#endif fail: rte_kvargs_free(kvlist); return ret; } -static struct idpf_vport * +static struct cpfl_vport * cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id) { - struct idpf_vport *vport = NULL; + struct cpfl_vport *vport = NULL; int i; for (i = 0; i < adapter->cur_vport_nb; i++) { - vport = &adapter->vports[i]->base; - if (vport->vport_id != vport_id) + vport = adapter->vports[i]; + if (vport == NULL) + continue; + if (vport->base.vport_id != vport_id) continue; else return vport; } - return vport; + return NULL; } static void -cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen) +cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen) { struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg; - struct rte_eth_dev_data *data = vport->dev_data; - struct rte_eth_dev *dev = &rte_eth_devices[data->port_id]; + struct cpfl_vport *vport; + struct rte_eth_dev_data *data; + struct rte_eth_dev *dev; if (msglen < sizeof(struct virtchnl2_event)) { PMD_DRV_LOG(ERR, "Error event"); return; } + /* ignore if it is ctrl vport */ + if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id) + return; + + vport = cpfl_find_vport(adapter, vc_event->vport_id); + if (!vport) { + PMD_DRV_LOG(ERR, "Can't find vport."); + return; + } + + data = vport->itf.data; + dev = &rte_eth_devices[data->port_id]; + switch (vc_event->event) { case VIRTCHNL2_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE"); - vport->link_up = !!(vc_event->link_status); - vport->link_speed = vc_event->link_speed; + vport->base.link_up = !!(vc_event->link_status); + vport->base.link_speed = vc_event->link_speed; cpfl_dev_link_update(dev, 0); break; default: @@ -1517,16 +1726,116 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen) } } +int +cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vport_identity, + struct cpchnl2_event_vport_created *vport_created) +{ + struct cpfl_vport_info *info = NULL; + int ret; + + rte_spinlock_lock(&adapter->vport_map_lock); + ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info); + if (ret >= 0) { + PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway"); + /* overwrite info */ + if (info) + info->vport = *vport_created; + goto fini; + } + + info = rte_zmalloc(NULL, sizeof(*info), 0); + if (info == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info"); + ret = -ENOMEM; + goto err; + } + + info->vport = *vport_created; + + ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add vport map into hash"); + rte_free(info); + goto err; + } + +fini: + rte_spinlock_unlock(&adapter->vport_map_lock); + return 0; +err: + rte_spinlock_unlock(&adapter->vport_map_lock); + return ret; +} + +static int +cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity) +{ + struct cpfl_vport_info *info; + int ret; + + rte_spinlock_lock(&adapter->vport_map_lock); + ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "vport id doesn't exist"); + goto err; + } + + rte_hash_del_key(adapter->vport_map_hash, vport_identity); + rte_spinlock_unlock(&adapter->vport_map_lock); + rte_free(info); + + return 0; + +err: + rte_spinlock_unlock(&adapter->vport_map_lock); + return ret; +} + +static void +cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen) +{ + struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg; + struct cpchnl2_event_vport_created *vport_created; + struct cpfl_vport_id vport_identity = { 0 }; + + if (msglen < sizeof(struct cpchnl2_event_info)) { + PMD_DRV_LOG(ERR, "Error event"); + return; + } + + switch (cpchnl2_event->header.type) { + case CPCHNL2_EVENT_VPORT_CREATED: + vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id; + vport_created = &cpchnl2_event->data.vport_created; + vport_identity.func_type = vport_created->info.func_type; + vport_identity.pf_id = vport_created->info.pf_id; + vport_identity.vf_id = vport_created->info.vf_id; + if (cpfl_vport_info_create(adapter, &vport_identity, vport_created)) + PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED"); + break; + case CPCHNL2_EVENT_VPORT_DESTROYED: + vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id; + vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type; + vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id; + vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id; + if (cpfl_vport_info_destroy(adapter, &vport_identity)) + PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY"); + break; + default: + PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type); + break; + } +} + static void cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) { struct idpf_adapter *base = &adapter->base; struct idpf_dma_mem *dma_mem = NULL; struct idpf_hw *hw = &base->hw; - struct virtchnl2_event *vc_event; struct idpf_ctlq_msg ctlq_msg; enum idpf_mbx_opc mbx_op; - struct idpf_vport *vport; uint16_t pending = 1; uint32_t vc_op; int ret; @@ -1548,18 +1857,11 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) switch (mbx_op) { case idpf_mbq_opc_send_msg_to_peer_pf: if (vc_op == VIRTCHNL2_OP_EVENT) { - if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) { - PMD_DRV_LOG(ERR, "Error event"); - return; - } - vc_event = (struct virtchnl2_event *)base->mbx_resp; - vport = cpfl_find_vport(adapter, vc_event->vport_id); - if (!vport) { - PMD_DRV_LOG(ERR, "Can't find vport."); - return; - } - cpfl_handle_event_msg(vport, base->mbx_resp, - ctlq_msg.data_len); + cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp, + ctlq_msg.data_len); + } else if (vc_op == CPCHNL2_OP_EVENT) { + cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp, + ctlq_msg.data_len); } else { if (vc_op == base->pend_cmd) notify_cmd(base, base->cmd_retval); @@ -1597,6 +1899,262 @@ cpfl_dev_alarm_handler(void *param) rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter); } +static int +cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter) +{ + int i, ret; + + for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to disable Tx config queue."); + return ret; + } + } + + for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to disable Rx config queue."); + return ret; + } + } + + return 0; +} + +static int +cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter) +{ + int i, ret; + + ret = cpfl_config_ctlq_tx(adapter); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to configure Tx config queue."); + return ret; + } + + ret = cpfl_config_ctlq_rx(adapter); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to configure Rx config queue."); + return ret; + } + + for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to enable Tx config queue."); + return ret; + } + } + + for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to enable Rx config queue."); + return ret; + } + } + + return 0; +} + +static void +cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter) +{ + struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw); + struct cpfl_ctlq_create_info *create_cfgq_info; + int i; + + create_cfgq_info = adapter->cfgq_info; + + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + if (adapter->ctlqp[i]) + cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]); + if (create_cfgq_info[i].ring_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem); + if (create_cfgq_info[i].buf_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem); + } +} + +static int +cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter) +{ + struct idpf_ctlq_info *cfg_cq; + int ret = 0; + int i = 0; + + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + cfg_cq = NULL; + ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw), + &adapter->cfgq_info[i], + &cfg_cq); + if (ret || !cfg_cq) { + PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d", + adapter->cfgq_info[i].id); + cpfl_remove_cfgqs(adapter); + return ret; + } + PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d", + adapter->cfgq_info[i].id); + adapter->ctlqp[i] = cfg_cq; + } + + return ret; +} + +#define CPFL_CFGQ_RING_LEN 512 +#define CPFL_CFGQ_DESCRIPTOR_SIZE 32 +#define CPFL_CFGQ_BUFFER_SIZE 256 +#define CPFL_CFGQ_RING_SIZE 512 + +static int +cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_ctlq_create_info *create_cfgq_info; + struct cpfl_vport *vport; + int i, err; + uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc); + uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE; + + vport = &adapter->ctrl_vport; + create_cfgq_info = adapter->cfgq_info; + + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + if (i % 2 == 0) { + /* Setup Tx config queue */ + create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2; + create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX; + create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE; + create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE; + memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg)); + create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start + + i / 2 * vport->base.chunks_info.tx_qtail_spacing; + } else { + /* Setup Rx config queue */ + create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2; + create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX; + create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE; + create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE; + memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg)); + create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start + + i / 2 * vport->base.chunks_info.rx_qtail_spacing; + if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem, + buf_size)) { + err = -ENOMEM; + goto free_mem; + } + } + if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem, + ring_size)) { + err = -ENOMEM; + goto free_mem; + } + } + return 0; +free_mem: + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + if (create_cfgq_info[i].ring_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem); + if (create_cfgq_info[i].buf_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem); + } + return err; +} + +static int +cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport *vport = &adapter->ctrl_vport; + struct virtchnl2_create_vport *vport_info = + (struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info; + int i; + + vport->itf.adapter = adapter; + vport->base.adapter = &adapter->base; + vport->base.vport_id = vport_info->vport_id; + + for (i = 0; i < vport_info->chunks.num_chunks; i++) { + if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) { + vport->base.chunks_info.tx_start_qid = + vport_info->chunks.chunks[i].start_queue_id; + vport->base.chunks_info.tx_qtail_start = + vport_info->chunks.chunks[i].qtail_reg_start; + vport->base.chunks_info.tx_qtail_spacing = + vport_info->chunks.chunks[i].qtail_reg_spacing; + } else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) { + vport->base.chunks_info.rx_start_qid = + vport_info->chunks.chunks[i].start_queue_id; + vport->base.chunks_info.rx_qtail_start = + vport_info->chunks.chunks[i].qtail_reg_start; + vport->base.chunks_info.rx_qtail_spacing = + vport_info->chunks.chunks[i].qtail_reg_spacing; + } else { + PMD_INIT_LOG(ERR, "Unsupported chunk type"); + return -EINVAL; + } + } + + return 0; +} + +static void +cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter) +{ + cpfl_stop_cfgqs(adapter); + cpfl_remove_cfgqs(adapter); + idpf_vc_vport_destroy(&adapter->ctrl_vport.base); +} + +static int +cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter) +{ + int ret; + + ret = cpfl_vc_create_ctrl_vport(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to create control vport"); + return ret; + } + + ret = cpfl_init_ctrl_vport(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init control vport"); + goto err_init_ctrl_vport; + } + + ret = cpfl_cfgq_setup(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup control queues"); + goto err_cfgq_setup; + } + + ret = cpfl_add_cfgqs(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to add control queues"); + goto err_add_cfgq; + } + + ret = cpfl_start_cfgqs(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to start control queues"); + goto err_start_cfgqs; + } + + return 0; + +err_start_cfgqs: + cpfl_stop_cfgqs(adapter); +err_add_cfgq: + cpfl_remove_cfgqs(adapter); +err_cfgq_setup: +err_init_ctrl_vport: + idpf_vc_vport_destroy(&adapter->ctrl_vport.base); + + return ret; +} + static struct virtchnl2_get_capabilities req_caps = { .csum_caps = VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | @@ -1635,6 +2193,84 @@ static struct virtchnl2_get_capabilities req_caps = { .other_caps = VIRTCHNL2_CAP_WB_ON_ITR }; +static int +cpfl_vport_map_init(struct cpfl_adapter_ext *adapter) +{ + char hname[32]; + + snprintf(hname, 32, "%s-vport", adapter->name); + + rte_spinlock_init(&adapter->vport_map_lock); + +#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048 + + struct rte_hash_parameters params = { + .name = adapter->name, + .entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM, + .key_len = sizeof(struct cpfl_vport_id), + .hash_func = rte_hash_crc, + .socket_id = SOCKET_ID_ANY, + }; + + adapter->vport_map_hash = rte_hash_create(¶ms); + + if (adapter->vport_map_hash == NULL) { + PMD_INIT_LOG(ERR, "Failed to create vport map hash"); + return -EINVAL; + } + + return 0; +} + +static void +cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter) +{ + const void *key = NULL; + struct cpfl_vport_map_info *info; + uint32_t iter = 0; + + while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0) + rte_free(info); + + rte_hash_free(adapter->vport_map_hash); +} + +static int +cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter) +{ + char hname[32]; + + snprintf(hname, 32, "%s-repr_al", adapter->name); + + rte_spinlock_init(&adapter->repr_lock); + +#define CPFL_REPR_HASH_ENTRY_NUM 2048 + + struct rte_hash_parameters params = { + .name = hname, + .entries = CPFL_REPR_HASH_ENTRY_NUM, + .key_len = sizeof(struct cpfl_repr_id), + .hash_func = rte_hash_crc, + .socket_id = SOCKET_ID_ANY, + }; + + adapter->repr_allowlist_hash = rte_hash_create(¶ms); + + if (adapter->repr_allowlist_hash == NULL) { + PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash"); + return -EINVAL; + } + + return 0; +} + +static void +cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter) +{ + rte_hash_free(adapter->repr_allowlist_hash); +} + + static int cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -1659,6 +2295,18 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_adapter_init; } + ret = cpfl_vport_map_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init vport map"); + goto err_vport_map_init; + } + + ret = cpfl_repr_allowlist_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init representor allowlist"); + goto err_repr_allowlist_init; + } + rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter); adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ? @@ -1674,6 +2322,19 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_vports_alloc; } + ret = cpfl_ctrl_path_open(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup control path"); + goto err_create_ctrl_vport; + } + +#ifdef RTE_HAS_JANSSON + ret = cpfl_flow_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init flow module"); + goto err_flow_init; + } +#endif adapter->cur_vports = 0; adapter->cur_vport_nb = 0; @@ -1681,8 +2342,18 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a return ret; +#ifdef RTE_HAS_JANSSON +err_flow_init: + cpfl_ctrl_path_close(adapter); +#endif +err_create_ctrl_vport: + rte_free(adapter->vports); err_vports_alloc: rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); + cpfl_repr_allowlist_uninit(adapter); +err_repr_allowlist_init: + cpfl_vport_map_uninit(adapter); +err_vport_map_init: idpf_adapter_deinit(base); err_adapter_init: return ret; @@ -1797,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport, return 0; } +int +cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size, + int batch_size) +{ + int i; + + if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) { + PMD_INIT_LOG(ERR, "Could not alloc dma memory"); + return -ENOMEM; + } + + for (i = 0; i < batch_size; i++) { + dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1)); + dma[i].pa = orig_dma->pa + size * (i + 1); + dma[i].size = size; + dma[i].zone = NULL; + } + return 0; +} + static int cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@ -1828,6 +2519,10 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) goto err; } + cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT; + cpfl_vport->itf.adapter = adapter; + cpfl_vport->itf.data = dev->data; + TAILQ_INIT(&cpfl_vport->itf.flow_list); adapter->vports[param->idx] = cpfl_vport; adapter->cur_vports |= RTE_BIT32(param->devarg_id); adapter->cur_vport_nb++; @@ -1842,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr, &dev->data->mac_addrs[0]); + memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma)); + memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg)); + ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma, + cpfl_vport->itf.dma, + sizeof(union cpfl_rule_cfg_pkt_record), + CPFL_FLOW_BATCH_SIZE); + if (ret < 0) + goto err_mac_addrs; + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) { memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info)); ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info); @@ -1908,7 +2612,12 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev) static void cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) { +#ifdef RTE_HAS_JANSSON + cpfl_flow_uninit(adapter); +#endif + cpfl_ctrl_path_close(adapter); rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); + cpfl_vport_map_uninit(adapter); idpf_adapter_deinit(&adapter->base); rte_free(adapter->vports); @@ -1916,21 +2625,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) } static int -cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) +cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_devargs *devargs = &adapter->devargs; + int i; + + /* refine vport number, at least 1 vport */ + if (devargs->req_vport_nb == 0) { + devargs->req_vport_nb = 1; + devargs->req_vports[0] = 0; + } + + /* check parsed devargs */ + if (adapter->cur_vport_nb + devargs->req_vport_nb > + adapter->max_vport_nb) { + PMD_INIT_LOG(ERR, "Total vport number can't be > %d", + adapter->max_vport_nb); + return -EINVAL; + } + + for (i = 0; i < devargs->req_vport_nb; i++) { + if (devargs->req_vports[i] > adapter->max_vport_nb - 1) { + PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d", + devargs->req_vports[i], adapter->max_vport_nb - 1); + return -EINVAL; + } + + if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) { + PMD_INIT_LOG(ERR, "Vport %d has been requested", + devargs->req_vports[i]); + return -EINVAL; + } + } + + return 0; +} + +static int +cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { struct cpfl_vport_param vport_param; - struct cpfl_adapter_ext *adapter; - struct cpfl_devargs devargs; char name[RTE_ETH_NAME_MAX_LEN]; - int i, retval; + int ret, i; - if (!cpfl_adapter_list_init) { - rte_spinlock_init(&cpfl_adapter_lock); - TAILQ_INIT(&cpfl_adapter_list); - cpfl_adapter_list_init = true; + for (i = 0; i < adapter->devargs.req_vport_nb; i++) { + vport_param.adapter = adapter; + vport_param.devarg_id = adapter->devargs.req_vports[i]; + vport_param.idx = cpfl_vport_idx_alloc(adapter); + if (vport_param.idx == CPFL_INVALID_VPORT_IDX) { + PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id); + break; + } + snprintf(name, sizeof(name), "net_%s_vport_%d", + pci_dev->device.name, + adapter->devargs.req_vports[i]); + ret = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct cpfl_vport), + NULL, NULL, cpfl_dev_vport_init, + &vport_param); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to create vport %d", + vport_param.devarg_id); } + return 0; +} + +static int +cpfl_pci_probe_first(struct rte_pci_device *pci_dev) +{ + struct cpfl_adapter_ext *adapter; + int retval; + uint16_t port_id; + adapter = rte_zmalloc("cpfl_adapter_ext", sizeof(struct cpfl_adapter_ext), 0); if (adapter == NULL) { @@ -1938,6 +2705,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -ENOMEM; } + retval = cpfl_parse_devargs(pci_dev, adapter, true); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to parse private devargs"); + return retval; + } + retval = cpfl_adapter_ext_init(pci_dev, adapter); if (retval != 0) { PMD_INIT_LOG(ERR, "Failed to init adapter."); @@ -1948,53 +2721,38 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next); rte_spinlock_unlock(&cpfl_adapter_lock); - retval = cpfl_parse_devargs(pci_dev, adapter, &devargs); + retval = cpfl_vport_devargs_process(adapter); if (retval != 0) { - PMD_INIT_LOG(ERR, "Failed to parse private devargs"); + PMD_INIT_LOG(ERR, "Failed to process vport devargs"); goto err; } - if (devargs.req_vport_nb == 0) { - /* If no vport devarg, create vport 0 by default. */ - vport_param.adapter = adapter; - vport_param.devarg_id = 0; - vport_param.idx = cpfl_vport_idx_alloc(adapter); - if (vport_param.idx == CPFL_INVALID_VPORT_IDX) { - PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id); - return 0; - } - snprintf(name, sizeof(name), "cpfl_%s_vport_0", - pci_dev->device.name); - retval = rte_eth_dev_create(&pci_dev->device, name, - sizeof(struct cpfl_vport), - NULL, NULL, cpfl_dev_vport_init, - &vport_param); - if (retval != 0) - PMD_DRV_LOG(ERR, "Failed to create default vport 0"); - } else { - for (i = 0; i < devargs.req_vport_nb; i++) { - vport_param.adapter = adapter; - vport_param.devarg_id = devargs.req_vports[i]; - vport_param.idx = cpfl_vport_idx_alloc(adapter); - if (vport_param.idx == CPFL_INVALID_VPORT_IDX) { - PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id); - break; - } - snprintf(name, sizeof(name), "cpfl_%s_vport_%d", - pci_dev->device.name, - devargs.req_vports[i]); - retval = rte_eth_dev_create(&pci_dev->device, name, - sizeof(struct cpfl_vport), - NULL, NULL, cpfl_dev_vport_init, - &vport_param); - if (retval != 0) - PMD_DRV_LOG(ERR, "Failed to create vport %d", - vport_param.devarg_id); - } + retval = cpfl_vport_create(pci_dev, adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to create vports."); + goto err; } + retval = cpfl_repr_devargs_process(adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to process repr devargs"); + goto close_ethdev; + } + + retval = cpfl_repr_create(pci_dev, adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to create representors "); + goto close_ethdev; + } + + return 0; +close_ethdev: + /* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */ + RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { + rte_eth_dev_close(port_id); + } err: rte_spinlock_lock(&cpfl_adapter_lock); TAILQ_REMOVE(&cpfl_adapter_list, adapter, next); @@ -2004,6 +2762,52 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return retval; } +static int +cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) +{ + int ret; + + ret = cpfl_parse_devargs(pci_dev, adapter, false); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to parse private devargs"); + return ret; + } + + ret = cpfl_repr_devargs_process(adapter); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs"); + return ret; + } + + ret = cpfl_repr_create(pci_dev, adapter); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to create representors "); + return ret; + } + + return 0; +} + +static int +cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct cpfl_adapter_ext *adapter; + + if (!cpfl_adapter_list_init) { + rte_spinlock_init(&cpfl_adapter_lock); + TAILQ_INIT(&cpfl_adapter_list); + cpfl_adapter_list_init = true; + } + + adapter = cpfl_find_adapter_ext(pci_dev); + + if (adapter == NULL) + return cpfl_pci_probe_first(pci_dev); + else + return cpfl_pci_probe_again(pci_dev, adapter); +} + static int cpfl_pci_remove(struct rte_pci_device *pci_dev) { @@ -2026,7 +2830,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_cpfl_pmd = { .id_table = pci_id_cpfl_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | + RTE_PCI_DRV_PROBE_AGAIN, .probe = cpfl_pci_probe, .remove = cpfl_pci_remove, }; diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 2e42354f708..efb0eb5251e 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -10,16 +10,20 @@ #include #include #include +#include #include #include -#include "cpfl_logs.h" - #include #include #include #include +#include "cpfl_logs.h" +#include "cpfl_cpchnl.h" +#include "cpfl_representor.h" +#include "cpfl_controlq.h" + /* Currently, backend supports up to 8 vports */ #define CPFL_MAX_VPORT_NUM 8 @@ -58,16 +62,56 @@ #define IDPF_DEV_ID_CPF 0x1453 #define VIRTCHNL2_QUEUE_GROUP_P2P 0x100 +#define CPFL_HOST_ID_NUM 2 +#define CPFL_PF_TYPE_NUM 2 +#define CPFL_HOST_ID_HOST 0 +#define CPFL_HOST_ID_ACC 1 +#define CPFL_PF_TYPE_APF 0 +#define CPFL_PF_TYPE_CPF 1 + +/* Function IDs on IMC side */ +#define CPFL_HOST0_APF 0 +#define CPFL_ACC_APF_ID 4 +#define CPFL_HOST0_CPF_ID 8 +#define CPFL_ACC_CPF_ID 12 + +#define CPFL_VPORT_LAN_PF 0 +#define CPFL_VPORT_LAN_VF 1 + +#define CPFL_FLOW_FILE_LEN 100 +#define CPFL_INVALID_HW_ID UINT16_MAX +#define CPFL_META_CHUNK_LENGTH 1024 +#define CPFL_META_LENGTH 32 + +#define CPFL_RX_CFGQ_NUM 4 +#define CPFL_TX_CFGQ_NUM 4 +#define CPFL_FPCP_CFGQ_TX 0 +#define CPFL_FPCP_CFGQ_RX 1 +#define CPFL_CFGQ_NUM 8 + +/* bit[15:14] type + * bit[13] host/accelerator core + * bit[12] apf/cpf + * bit[11:0] vf + */ +#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id) \ + ((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) + \ + (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff)) + struct cpfl_vport_param { struct cpfl_adapter_ext *adapter; uint16_t devarg_id; /* arg id from user */ uint16_t idx; /* index in adapter->vports[]*/ }; +#define CPFL_REPR_ARG_NUM_MAX 4 /* Struct used when parse driver specific devargs */ struct cpfl_devargs { uint16_t req_vports[CPFL_MAX_VPORT_NUM]; uint16_t req_vport_nb; + uint8_t repr_args_num; + struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX]; + char flow_parser[CPFL_FLOW_FILE_LEN]; }; struct p2p_queue_chunks_info { @@ -86,7 +130,38 @@ struct p2p_queue_chunks_info { uint32_t rx_buf_qtail_spacing; }; +struct cpfl_vport_id { + uint32_t vport_id; + uint8_t func_type; + uint8_t pf_id; + uint16_t vf_id; +}; + +struct cpfl_vport_info { + struct cpchnl2_event_vport_created vport; + bool enabled; +}; + +enum cpfl_itf_type { + CPFL_ITF_TYPE_VPORT, + CPFL_ITF_TYPE_REPRESENTOR, +}; + +TAILQ_HEAD(cpfl_flow_list, rte_flow); + +#define CPFL_FLOW_BATCH_SIZE 490 +struct cpfl_itf { + enum cpfl_itf_type type; + struct cpfl_adapter_ext *adapter; + struct cpfl_flow_list flow_list; + struct idpf_dma_mem flow_dma; + struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE]; + struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE]; + void *data; +}; + struct cpfl_vport { + struct cpfl_itf itf; struct idpf_vport base; struct p2p_queue_chunks_info *p2p_q_chunks_info; @@ -102,6 +177,28 @@ struct cpfl_vport { bool p2p_manual_bind; }; +struct cpfl_repr { + struct cpfl_itf itf; + struct cpfl_repr_id repr_id; + struct rte_ether_addr mac_addr; + struct cpfl_vport_info *vport_info; + bool func_up; /* If the represented function is up */ +}; + +struct cpfl_metadata_chunk { + int type; + uint8_t data[CPFL_META_CHUNK_LENGTH]; +}; + +/** + * It is driver's responsibility to simlulate a metadata buffer which + * can be used as data source to fill the key of a flow rule. + */ +struct cpfl_metadata { + int length; + struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH]; +}; + struct cpfl_adapter_ext { TAILQ_ENTRY(cpfl_adapter_ext) next; struct idpf_adapter base; @@ -115,13 +212,131 @@ struct cpfl_adapter_ext { uint16_t cur_vport_nb; uint16_t used_vecs_num; + struct cpfl_devargs devargs; + + rte_spinlock_t vport_map_lock; + struct rte_hash *vport_map_hash; + + rte_spinlock_t repr_lock; + struct rte_hash *repr_allowlist_hash; + + struct cpfl_flow_js_parser *flow_parser; + struct rte_bitmap *mod_bm; + void *mod_bm_mem; + + struct cpfl_metadata meta; + + /* ctrl vport and ctrl queues. */ + struct cpfl_vport ctrl_vport; + uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE]; + struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM]; + struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM]; }; TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); +int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vport_identity, + struct cpchnl2_event_vport_created *vport); +int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_list_response *response); +int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, + struct cpchnl2_vport_id *vport_id, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_info_response *response); +int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter); +int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter); +int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter); +int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, + uint32_t size, int batch_size); + #define CPFL_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) #define CPFL_ADAPTER_TO_EXT(p) \ container_of((p), struct cpfl_adapter_ext, base) +#define CPFL_DEV_TO_VPORT(dev) \ + ((struct cpfl_vport *)((dev)->data->dev_private)) +#define CPFL_DEV_TO_REPR(dev) \ + ((struct cpfl_repr *)((dev)->data->dev_private)) +#define CPFL_DEV_TO_ITF(dev) \ + ((struct cpfl_itf *)((dev)->data->dev_private)) + +static inline uint16_t +cpfl_get_port_id(struct cpfl_itf *itf) +{ + if (!itf) + return CPFL_INVALID_HW_ID; + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + struct cpfl_vport *vport = (void *)itf; + + return vport->base.devarg_id; + } + + return CPFL_INVALID_HW_ID; +} + +static inline uint16_t +cpfl_get_vsi_id(struct cpfl_itf *itf) +{ + struct cpfl_adapter_ext *adapter = itf->adapter; + struct cpfl_vport_info *info; + uint32_t vport_id; + int ret; + struct cpfl_vport_id vport_identity; + + if (!itf) + return CPFL_INVALID_HW_ID; + + if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) { + struct cpfl_repr *repr = (void *)itf; + + return repr->vport_info->vport.info.vsi_id; + } else if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport_id = ((struct cpfl_vport *)itf)->base.vport_id; + + vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF; + /* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */ + vport_identity.pf_id = CPFL_ACC_CPF_ID; + vport_identity.vf_id = 0; + vport_identity.vport_id = vport_id; + ret = rte_hash_lookup_data(adapter->vport_map_hash, + &vport_identity, + (void **)&info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "vport id not exist"); + goto err; + } + + return info->vport.info.vsi_id; + } + +err: + return CPFL_INVALID_HW_ID; +} + +static inline struct cpfl_itf * +cpfl_get_itf_by_port_id(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= RTE_MAX_ETHPORTS) { + PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS); + return NULL; + } + + dev = &rte_eth_devices[port_id]; + if (dev->state == RTE_ETH_DEV_UNUSED) { + PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id); + return NULL; + } + + if (!dev->data) { + PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id); + return NULL; + } + return CPFL_DEV_TO_ITF(dev); +} #endif /* _CPFL_ETHDEV_H_ */ diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c new file mode 100644 index 00000000000..4c9e375cca0 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow.c @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include +#include + +#include "cpfl_flow.h" +#include "cpfl_flow_parser.h" + +TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine); + +static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list); + +void +cpfl_flow_engine_register(struct cpfl_flow_engine *engine) +{ + TAILQ_INSERT_TAIL(&engine_list, engine, node); +} + +struct cpfl_flow_engine * +cpfl_flow_engine_match(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (!engine->parse_pattern_action) + continue; + + if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0) + continue; + return engine; + } + + return NULL; +} + +int +cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + int ret; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (!engine->init) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", + engine->type); + return -ENOTSUP; + } + + ret = engine->init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize engine %d", + engine->type); + return ret; + } + } + + return 0; +} + +void +cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->uninit) + engine->uninit(adapter); + } +} + +static int +cpfl_flow_attr_valid(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr->priority > CPFL_PREC_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Only support priority 0-7."); + return -rte_errno; + } + + return 0; +} + +static int +cpfl_flow_param_valid(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + ret = cpfl_flow_attr_valid(attr, error); + if (ret) + return ret; + + if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + return 0; +} + +static int +__cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct cpfl_flow_engine **engine, + struct rte_flow_error *error) +{ + int ret; + + ret = cpfl_flow_param_valid(attr, pattern, actions, error); + if (ret) + return ret; + + *engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta); + if (!*engine) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched engine."); + return -rte_errno; + } + + return 0; +} + +int +cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cpfl_flow_engine *engine = NULL; + int ret; + + ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error); + + return ret; +} + +struct rte_flow * +cpfl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_flow_engine *engine = NULL; + struct rte_flow *flow; + void *meta; + int ret; + + flow = rte_malloc(NULL, sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return NULL; + } + + ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error); + if (ret) { + rte_free(flow); + return NULL; + } + + if (!engine->create) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched flow creation function"); + rte_free(flow); + return NULL; + } + + ret = engine->create(dev, flow, meta, error); + if (ret) { + rte_free(flow); + return NULL; + } + + flow->engine = engine; + TAILQ_INSERT_TAIL(&itf->flow_list, flow, next); + + return flow; +} + +int +cpfl_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + int ret = 0; + + if (!flow || !flow->engine || !flow->engine->destroy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + ret = flow->engine->destroy(dev, flow, error); + if (!ret) + TAILQ_REMOVE(&itf->flow_list, flow, next); + else + PMD_DRV_LOG(ERR, "Failed to destroy flow"); + + return ret; +} + +int +cpfl_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct rte_flow *p_flow; + void *temp; + int ret = 0; + + RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) { + ret = cpfl_flow_destroy(dev, p_flow, error); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to flush flows"); + return -EINVAL; + } + } + + return ret; +} + +int +cpfl_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + struct rte_flow_query_count *count = data; + int ret = -EINVAL; + + if (!flow || !flow->engine || !flow->engine->query_count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow->engine->query_count(dev, flow, count, error); + break; + default: + ret = rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + break; + } + } + + return ret; +} + +const struct rte_flow_ops cpfl_flow_ops = { + .validate = cpfl_flow_validate, + .create = cpfl_flow_create, + .destroy = cpfl_flow_destroy, + .flush = cpfl_flow_flush, + .query = cpfl_flow_query, +}; + +int +cpfl_flow_init(struct cpfl_adapter_ext *ad) +{ + int ret; + + if (ad->devargs.flow_parser[0] == '\0') { + PMD_INIT_LOG(WARNING, "flow module is not initialized"); + return 0; + } + + ret = cpfl_flow_engine_init(ad); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to init flow engines"); + goto err; + } + + ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to create flow parser"); + goto err; + } + + return ret; + +err: + cpfl_flow_engine_uninit(ad); + return ret; +} + +void +cpfl_flow_uninit(struct cpfl_adapter_ext *ad) +{ + if (ad->devargs.flow_parser[0] == '\0') + return; + + cpfl_parser_destroy(ad->flow_parser); + cpfl_flow_engine_uninit(ad); +} diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h new file mode 100644 index 00000000000..22c0cb4e659 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FLOW_H_ +#define _CPFL_FLOW_H_ + +#include +#include "cpfl_ethdev.h" + +#define CPFL_PREC_MAX 7 + +extern const struct rte_flow_ops cpfl_flow_ops; + +enum cpfl_flow_engine_type { + CPFL_FLOW_ENGINE_NONE = 0, + CPFL_FLOW_ENGINE_FXP, +}; + +typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad); +typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad); +typedef int (*engine_create_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_destroy_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error); +typedef int (*engine_query_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_query_count *count, + struct rte_flow_error *error); +typedef void (*engine_free_t) (struct rte_flow *flow); +typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta); + +struct cpfl_flow_engine { + TAILQ_ENTRY(cpfl_flow_engine) node; + enum cpfl_flow_engine_type type; + engine_init_t init; + engine_uninit_t uninit; + engine_create_t create; + engine_destroy_t destroy; + engine_query_t query_count; + engine_free_t free; + engine_parse_pattern_action_t parse_pattern_action; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + struct cpfl_flow_engine *engine; + void *rule; +}; + +void cpfl_flow_engine_register(struct cpfl_flow_engine *engine); +struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta); +int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter); +void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter); +int cpfl_flow_init(struct cpfl_adapter_ext *ad); +void cpfl_flow_uninit(struct cpfl_adapter_ext *ad); +struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); +int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int cpfl_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); +#endif diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c new file mode 100644 index 00000000000..ddede2f5531 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c @@ -0,0 +1,666 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cpfl_rules.h" +#include "cpfl_logs.h" +#include "cpfl_ethdev.h" +#include "cpfl_flow.h" +#include "cpfl_fxp_rule.h" +#include "cpfl_flow_parser.h" + +#define CPFL_COOKIE_DEF 0x1000 +#define CPFL_MOD_COOKIE_DEF 0x1237561 +#define CPFL_PREC_DEF 1 +#define CPFL_PREC_SET 5 +#define CPFL_TYPE_ID 3 +#define CPFL_OFFSET 0x0a +#define CPFL_HOST_ID_DEF 0 +#define CPFL_PF_NUM_DEF 0 +#define CPFL_PORT_NUM_DEF 0 +#define CPFL_RESP_REQ_DEF 2 +#define CPFL_PIN_TO_CACHE_DEF 0 +#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF 0 +#define CPFL_FIXED_FETCH_DEF 0 +#define CPFL_PTI_DEF 0 +#define CPFL_MOD_OBJ_SIZE_DEF 0 +#define CPFL_PIN_MOD_CONTENT_DEF 0 + +#define CPFL_MAX_MOD_CONTENT_INDEX 256 +#define CPFL_MAX_MR_ACTION_NUM 8 + +/* Struct used when parse detailed rule information with json file */ +struct cpfl_rule_info_meta { + struct cpfl_flow_pr_action pr_action; /* json action field of pattern rule */ + uint32_t pr_num; /* number of pattern rules */ + uint32_t mr_num; /* number of modification rules */ + uint32_t rule_num; /* number of all rules */ + struct cpfl_rule_info rules[0]; +}; + +static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad); +static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx); +uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF; + +static int +cpfl_fxp_create(struct rte_eth_dev *dev, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + int ret = 0; + uint32_t cpq_id = 0; + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_adapter_ext *ad = itf->adapter; + struct cpfl_rule_info_meta *rim = meta; + struct cpfl_vport *vport; + struct cpfl_repr *repr; + + if (!rim) + return ret; + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)itf; + /* Every vport has one pair control queues configured to handle message. + * Even index is tx queue and odd index is rx queue. + */ + cpq_id = vport->base.devarg_id * 2; + } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) { + repr = (struct cpfl_repr *)itf; + cpq_id = ((repr->repr_id.pf_id + repr->repr_id.vf_id) & + (CPFL_TX_CFGQ_NUM - 1)) * 2; + } else { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to find correct control queue"); + return -rte_errno; + } + + ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], + rim->rules, rim->rule_num, true); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "cpfl filter create flow fail"); + rte_free(rim); + return ret; + } + + flow->rule = rim; + + return ret; +} + +static inline void +cpfl_fxp_rule_free(struct rte_flow *flow) +{ + rte_free(flow->rule); + flow->rule = NULL; +} + +static int +cpfl_fxp_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret = 0; + uint32_t cpq_id = 0; + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_adapter_ext *ad = itf->adapter; + struct cpfl_rule_info_meta *rim; + uint32_t i; + struct cpfl_vport *vport; + struct cpfl_repr *repr; + + rim = flow->rule; + if (!rim) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "no such flow create by cpfl filter"); + + return -rte_errno; + } + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)itf; + cpq_id = vport->base.devarg_id * 2; + } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) { + repr = (struct cpfl_repr *)itf; + cpq_id = ((repr->repr_id.pf_id + repr->repr_id.vf_id) & + (CPFL_TX_CFGQ_NUM - 1)) * 2; + } else { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to find correct control queue"); + ret = -rte_errno; + goto err; + } + + ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules, + rim->rule_num, false); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to destroy cpfl filter rule"); + goto err; + } + + /* free mod index */ + for (i = rim->pr_num; i < rim->rule_num; i++) + cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index); +err: + cpfl_fxp_rule_free(flow); + return ret; +} + +static bool +cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action, + struct cpfl_rule_info_meta *rim, + int i) +{ + if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) { + struct cpfl_rule_info *rinfo = &rim->rules[i]; + + rinfo->type = CPFL_RULE_TYPE_SEM; + rinfo->sem.prof_id = pr_action->sem.prof; + rinfo->sem.sub_prof_id = pr_action->sem.subprof; + rinfo->sem.key_byte_len = pr_action->sem.keysize; + memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len); + rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF; + rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF; + } else { + PMD_DRV_LOG(ERR, "Invalid pattern item."); + return false; + } + + return true; +} + +static int +cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter, + struct cpfl_rule_info *match_rinfo, + struct cpfl_rule_info *mod_rinfo, + const struct cpfl_flow_mr_action *mr_action) +{ + struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod; + uint32_t mod_idx; + int i; + int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set)); + union cpfl_action_set *act_set = + &((union cpfl_action_set *)match_rinfo->act_bytes)[next]; + + if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD) + return -EINVAL; + + *act_set = cpfl_act_mod_profile(CPFL_PREC_DEF, + mr_action->mod.prof, + CPFL_PTI_DEF, + 0, /* append */ + 0, /* prepend */ + CPFL_ACT_MOD_PROFILE_PREFETCH_256B); + + act_set++; + match_rinfo->act_byte_len += sizeof(union cpfl_action_set); + + mod_idx = cpfl_fxp_mod_idx_alloc(adapter); + if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) { + PMD_DRV_LOG(ERR, "Out of Mod Index."); + return -ENOMEM; + } + + *act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx); + + act_set++; + match_rinfo->act_byte_len += sizeof(union cpfl_action_set); + + mod_rinfo->type = CPFL_RULE_TYPE_MOD; + minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF; + minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF; + minfo->mod_index = mod_idx; + mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF; + mod_rinfo->port_num = CPFL_PORT_NUM_DEF; + mod_rinfo->resp_req = CPFL_RESP_REQ_DEF; + + minfo->mod_content_byte_len = mr_action->mod.byte_len + 2; + for (i = 0; i < minfo->mod_content_byte_len; i++) + minfo->mod_content[i] = mr_action->mod.data[i]; + + return 0; +} + +#define CPFL_FXP_MAX_QREGION_SIZE 128 +#define CPFL_INVALID_QUEUE_ID -2 +static int +cpfl_fxp_parse_action(struct cpfl_itf *itf, + const struct rte_flow_action *actions, + const struct cpfl_flow_mr_action *mr_action, + struct cpfl_rule_info_meta *rim, + int priority, + int index) +{ + const struct rte_flow_action_ethdev *act_ethdev; + const struct rte_flow_action *action; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_rss *rss; + struct rte_eth_dev_data *data; + enum rte_flow_action_type action_type; + struct cpfl_vport *vport; + /* used when action is PORT_REPRESENTOR type */ + struct cpfl_itf *dst_itf; + uint16_t dev_id; /* vsi id */ + int queue_id = -1; + bool fwd_vsi = false; + bool fwd_q = false; + bool is_vsi; + uint32_t i; + struct cpfl_rule_info *rinfo = &rim->rules[index]; + union cpfl_action_set *act_set = (void *)rinfo->act_bytes; + + priority = CPFL_PREC_MAX - priority; + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + if (!fwd_vsi) + fwd_vsi = true; + else + goto err; + + act_ethdev = action->conf; + dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id); + + if (!dst_itf) + goto err; + + if (dst_itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)dst_itf; + queue_id = vport->base.chunks_info.rx_start_qid; + } else { + queue_id = CPFL_INVALID_QUEUE_ID; + } + + is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR || + dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR); + if (is_vsi) + dev_id = cpfl_get_vsi_id(dst_itf); + else + dev_id = cpfl_get_port_id(dst_itf); + + if (dev_id == CPFL_INVALID_HW_ID) + goto err; + + if (is_vsi) + *act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id); + else + *act_set = cpfl_act_fwd_port(0, priority, 0, dev_id); + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + if (!fwd_q) + fwd_q = true; + else + goto err; + if (queue_id == CPFL_INVALID_QUEUE_ID) + goto err; + act_q = action->conf; + data = itf->data; + if (act_q->index >= data->nb_rx_queues) + goto err; + + vport = (struct cpfl_vport *)itf; + if (queue_id < 0) + queue_id = vport->base.chunks_info.rx_start_qid; + queue_id += act_q->index; + *act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0); + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = action->conf; + if (rss->queue_num <= 1) + goto err; + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) + goto err; + } + data = itf->data; + if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues) + goto err; + if (!(rte_is_power_of_2(rss->queue_num) && + rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE)) + goto err; + + if (!fwd_q) + fwd_q = true; + else + goto err; + if (queue_id == CPFL_INVALID_QUEUE_ID) + goto err; + vport = (struct cpfl_vport *)itf; + if (queue_id < 0) + queue_id = vport->base.chunks_info.rx_start_qid; + queue_id += rss->queue[0]; + *act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id, + log(rss->queue_num) / log(2), 0); + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + (*act_set).data = cpfl_act_drop(priority).data; + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + (*act_set).data = cpfl_act_set_commit_mode(priority, 0).data; + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; + default: + goto err; + } + } + + if (mr_action) { + uint32_t i; + + for (i = 0; i < rim->mr_num; i++) + if (cpfl_parse_mod_content(itf->adapter, rinfo, + &rim->rules[rim->pr_num + i], + &mr_action[i])) + goto err; + } + + return 0; + +err: + PMD_DRV_LOG(ERR, "Invalid action type"); + return -EINVAL; +} + +static void +cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo) +{ + if (cpfl_rule_cookie == ~0llu) + cpfl_rule_cookie = CPFL_COOKIE_DEF; + rinfo->cookie = cpfl_rule_cookie++; + rinfo->host_id = CPFL_HOST_ID_DEF; + rinfo->port_num = CPFL_PORT_NUM_DEF; + rinfo->resp_req = CPFL_RESP_REQ_DEF; + rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF; +} + +static bool +cpfl_is_mod_action(const struct rte_flow_action actions[]) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + + if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) + return false; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + return true; + default: + continue; + } + } + return false; +} + +static bool +cpfl_fxp_get_metadata_port(struct cpfl_itf *itf, + const struct rte_flow_action actions[]) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + const struct rte_flow_action_ethdev *ethdev; + struct cpfl_itf *target_itf; + bool ret; + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + ret = cpfl_metadata_write_port_id(itf); + if (!ret) { + PMD_DRV_LOG(ERR, "fail to write port id"); + return false; + } + } + + ret = cpfl_metadata_write_sourcevsi(itf); + if (!ret) { + PMD_DRV_LOG(ERR, "fail to write source vsi id"); + return false; + } + + ret = cpfl_metadata_write_vsi(itf); + if (!ret) { + PMD_DRV_LOG(ERR, "fail to write vsi id"); + return false; + } + + if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) + return false; + + for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + ethdev = (const struct rte_flow_action_ethdev *)action->conf; + target_itf = cpfl_get_itf_by_port_id(ethdev->port_id); + if (!target_itf) { + PMD_DRV_LOG(ERR, "fail to get target_itf by port id"); + return false; + } + ret = cpfl_metadata_write_targetvsi(target_itf); + if (!ret) { + PMD_DRV_LOG(ERR, "fail to write target vsi id"); + return false; + } + break; + default: + continue; + } + } + + return true; +} + +static int +cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_flow_pr_action pr_action = { 0 }; + struct cpfl_adapter_ext *adapter = itf->adapter; + struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 }; + uint32_t pr_num = 0; + uint32_t mr_num = 0; + struct cpfl_rule_info_meta *rim; + int ret; + + ret = cpfl_fxp_get_metadata_port(itf, actions); + if (!ret) { + PMD_DRV_LOG(ERR, "Fail to save metadata."); + return -EINVAL; + } + + ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action); + if (ret) { + PMD_DRV_LOG(ERR, "No Match pattern support."); + return -EINVAL; + } + + if (cpfl_is_mod_action(actions)) { + ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action); + if (ret) { + PMD_DRV_LOG(ERR, "action parse fails."); + return -EINVAL; + } + mr_num++; + } + + pr_num = 1; + rim = rte_zmalloc(NULL, + sizeof(struct cpfl_rule_info_meta) + + (pr_num + mr_num) * sizeof(struct cpfl_rule_info), + 0); + if (!rim) + return -ENOMEM; + + rim->pr_action = pr_action; + rim->pr_num = pr_num; + rim->mr_num = mr_num; + rim->rule_num = pr_num + mr_num; + + if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) { + PMD_DRV_LOG(ERR, "Invalid pattern"); + rte_free(rim); + return -rte_errno; + } + + if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) { + PMD_DRV_LOG(ERR, "Invalid action"); + rte_free(rim); + return -rte_errno; + } + + cpfl_fill_rinfo_default_value(&rim->rules[0]); + + if (!meta) + rte_free(rim); + else + *meta = rim; + + return 0; +} + +static int +cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad) +{ + uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX); + void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); + + if (!mem) + return -ENOMEM; + + /* a set bit represent a free slot */ + ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size); + if (!ad->mod_bm) { + rte_free(mem); + return -EINVAL; + } + + ad->mod_bm_mem = mem; + + return 0; +} + +static void +cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad) +{ + rte_free(ad->mod_bm_mem); + ad->mod_bm_mem = NULL; + ad->mod_bm = NULL; +} + +static uint32_t +cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad) +{ + uint64_t slab = 0; + uint32_t pos = 0; + + if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab)) + return CPFL_MAX_MOD_CONTENT_INDEX; + + pos += __builtin_ffsll(slab) - 1; + rte_bitmap_clear(ad->mod_bm, pos); + + return pos; +} + +static void +cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx) +{ + rte_bitmap_set(ad->mod_bm, idx); +} + +static int +cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + struct rte_flow_query_count *count __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "count action not supported by this module"); + + return -rte_errno; +} + +static void +cpfl_fxp_uninit(struct cpfl_adapter_ext *ad) +{ + cpfl_fxp_mod_uninit(ad); +} + +static int +cpfl_fxp_init(struct cpfl_adapter_ext *ad) +{ + int ret = 0; + + ret = cpfl_fxp_mod_init(ad); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to init mod content bitmap."); + return ret; + } + + return ret; +} + +static struct +cpfl_flow_engine cpfl_fxp_engine = { + .type = CPFL_FLOW_ENGINE_FXP, + .init = cpfl_fxp_init, + .uninit = cpfl_fxp_uninit, + .create = cpfl_fxp_create, + .destroy = cpfl_fxp_destroy, + .query_count = cpfl_fxp_query, + .parse_pattern_action = cpfl_fxp_parse_pattern_action, +}; + +RTE_INIT(cpfl_sw_engine_init) +{ + struct cpfl_flow_engine *engine = &cpfl_fxp_engine; + + cpfl_flow_engine_register(engine); +} diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c new file mode 100644 index 00000000000..0e623494a24 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow_parser.c @@ -0,0 +1,1835 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#include + +#include "cpfl_flow_parser.h" + +static enum rte_flow_item_type +cpfl_get_item_type_by_str(const char *type) +{ + if (strcmp(type, "eth") == 0) + return RTE_FLOW_ITEM_TYPE_ETH; + else if (strcmp(type, "ipv4") == 0) + return RTE_FLOW_ITEM_TYPE_IPV4; + else if (strcmp(type, "tcp") == 0) + return RTE_FLOW_ITEM_TYPE_TCP; + else if (strcmp(type, "udp") == 0) + return RTE_FLOW_ITEM_TYPE_UDP; + else if (strcmp(type, "vxlan") == 0) + return RTE_FLOW_ITEM_TYPE_VXLAN; + else if (strcmp(type, "icmp") == 0) + return RTE_FLOW_ITEM_TYPE_ICMP; + else if (strcmp(type, "vlan") == 0) + return RTE_FLOW_ITEM_TYPE_VLAN; + + PMD_DRV_LOG(ERR, "Not support this type: %s.", type); + return RTE_FLOW_ITEM_TYPE_VOID; +} + +static enum rte_flow_action_type +cpfl_get_action_type_by_str(const char *type) +{ + if (strcmp(type, "vxlan_encap") == 0) + return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP; + else if (strcmp(type, "vxlan_decap") == 0) + return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP; + + PMD_DRV_LOG(ERR, "Not support this type: %s.", type); + return RTE_FLOW_ACTION_TYPE_VOID; +} + +static const char * +cpfl_json_t_to_string(json_t *object, const char *name) +{ + json_t *subobject; + + if (!object) { + PMD_DRV_LOG(ERR, "object doesn't exist."); + return NULL; + } + subobject = json_object_get(object, name); + if (!subobject) { + PMD_DRV_LOG(ERR, "%s doesn't exist.", name); + return NULL; + } + + return json_string_value(subobject); +} + +static int +cpfl_json_t_to_int(json_t *object, const char *name, int *value) +{ + json_t *subobject; + + if (!object) { + PMD_DRV_LOG(ERR, "object doesn't exist."); + return -EINVAL; + } + subobject = json_object_get(object, name); + if (!subobject) { + PMD_DRV_LOG(ERR, "%s doesn't exist.", name); + return -EINVAL; + } + if (!json_is_integer(subobject)) { + PMD_DRV_LOG(ERR, "%s is not an integer.", name); + return -EINVAL; + } + *value = (int)json_integer_value(subobject); + + return 0; +} + +static int +cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value) +{ + json_t *subobject; + + if (!object) { + PMD_DRV_LOG(ERR, "object doesn't exist."); + return -EINVAL; + } + subobject = json_object_get(object, name); + if (!subobject) { + PMD_DRV_LOG(ERR, "%s doesn't exist.", name); + return -EINVAL; + } + if (!json_is_integer(subobject)) { + PMD_DRV_LOG(ERR, "%s is not an integer.", name); + return -EINVAL; + } + *value = (uint16_t)json_integer_value(subobject); + + return 0; +} + +static int +cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value) +{ + json_t *subobject; + + if (!object) { + PMD_DRV_LOG(ERR, "object doesn't exist."); + return -EINVAL; + } + subobject = json_object_get(object, name); + if (!subobject) { + PMD_DRV_LOG(ERR, "%s doesn't exist.", name); + return -EINVAL; + } + if (!json_is_integer(subobject)) { + PMD_DRV_LOG(ERR, "%s is not an integer.", name); + return -EINVAL; + } + *value = (uint32_t)json_integer_value(subobject); + + return 0; +} + +static int +cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr) +{ + int i, len; + struct cpfl_flow_js_pr_key_attr *attr; + + len = json_array_size(ob_pr_key_attrs); + js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0); + if (!js_pr->key.attributes) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + js_pr->key.attr_size = len; + attr = js_pr->key.attributes; + for (i = 0; i < len; i++) { + json_t *object; + const char *name; + uint16_t value = 0; + int ret; + + object = json_array_get(ob_pr_key_attrs, i); + name = cpfl_json_t_to_string(object, "Name"); + if (!name) { + PMD_DRV_LOG(ERR, "Can not parse string 'Name'."); + goto err; + } + ret = cpfl_json_t_to_uint16(object, "Value", &value); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'value'."); + goto err; + } + if (strcmp(name, "ingress") == 0) { + attr->ingress = value; + } else if (strcmp(name, "egress") == 0) { + attr->egress = value; + } else { + /* TODO: more... */ + PMD_DRV_LOG(ERR, "Not support attr name: %s.", name); + goto err; + } + } + + return 0; +err: + rte_free(js_pr->key.attributes); + return -EINVAL; +} + +static int +cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields, + struct cpfl_flow_js_pr_key_proto *js_field) +{ + int len, i; + + if (!ob_fields) + return 0; + len = json_array_size(ob_fields); + if (len == 0) + return 0; + js_field->fields_size = len; + js_field->fields = + rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0); + if (!js_field->fields) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + for (i = 0; i < len; i++) { + json_t *object; + const char *name, *mask; + + object = json_array_get(ob_fields, i); + name = cpfl_json_t_to_string(object, "name"); + if (!name) { + PMD_DRV_LOG(ERR, "Can not parse string 'name'."); + goto err; + } + if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) { + PMD_DRV_LOG(ERR, "The 'name' is too long."); + goto err; + } + memcpy(js_field->fields[i].name, name, strlen(name)); + + if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH || + js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) { + mask = cpfl_json_t_to_string(object, "mask"); + if (!mask) { + PMD_DRV_LOG(ERR, "Can not parse string 'mask'."); + goto err; + } + if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) { + PMD_DRV_LOG(ERR, "The 'mask' is too long."); + goto err; + } + memcpy(js_field->fields[i].mask, mask, strlen(mask)); + } else { + uint32_t mask_32b; + int ret; + + ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'."); + goto err; + } + js_field->fields[i].mask_32b = mask_32b; + } + } + + return 0; + +err: + rte_free(js_field->fields); + return -EINVAL; +} + +static int +cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr) +{ + int len, i, ret; + + len = json_array_size(ob_pr_key_protos); + if (len == 0) + return 0; + js_pr->key.proto_size = len; + js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0); + if (!js_pr->key.protocols) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + + for (i = 0; i < len; i++) { + json_t *object, *ob_fields; + const char *type; + enum rte_flow_item_type item_type; + + object = json_array_get(ob_pr_key_protos, i); + /* pr->key->proto->type */ + type = cpfl_json_t_to_string(object, "type"); + if (!type) { + PMD_DRV_LOG(ERR, "Can not parse string 'type'."); + goto err; + } + item_type = cpfl_get_item_type_by_str(type); + if (item_type == RTE_FLOW_ITEM_TYPE_VOID) + goto err; + js_pr->key.protocols[i].type = item_type; + /* pr->key->proto->fields */ + ob_fields = json_object_get(object, "fields"); + ret = cpfl_flow_js_pattern_key_proto_field(ob_fields, + &js_pr->key.protocols[i]); + if (ret < 0) + goto err; + } + + return 0; + +err: + rte_free(js_pr->key.protocols); + return -EINVAL; +} + +static int +cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv) +{ + uint16_t layer = 0, offset = 0, mask = 0; + const char *header; + enum rte_flow_item_type type; + int ret; + + ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'value'."); + return -EINVAL; + } + + header = cpfl_json_t_to_string(ob_value, "header"); + if (!header) { + PMD_DRV_LOG(ERR, "Can not parse string 'header'."); + return -EINVAL; + } + ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'offset'."); + return -EINVAL; + } + ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'mask'."); + return -EINVAL; + } + type = cpfl_get_item_type_by_str(header); + if (type == RTE_FLOW_ITEM_TYPE_VOID) + return -EINVAL; + js_fv->proto.layer = layer; + js_fv->proto.offset = offset; + js_fv->proto.mask = mask; + js_fv->proto.header = type; + + return 0; +} + +static int +cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv) +{ + int ret; + + ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'size'."); + return ret; + } + ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'size'."); + return ret; + } + ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'size'."); + return ret; + } + + return 0; +} + +static int +cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act) +{ + int len, i; + + len = json_array_size(ob_fvs); + if (len == 0) + return 0; + js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0); + if (!js_act->sem.fv) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + js_act->sem.fv_size = len; + for (i = 0; i < len; i++) { + struct cpfl_flow_js_fv *js_fv; + json_t *object, *ob_value; + uint16_t offset = 0; + const char *type; + int ret; + + js_fv = &js_act->sem.fv[i]; + object = json_array_get(ob_fvs, i); + ret = cpfl_json_t_to_uint16(object, "offset", &offset); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'offset'."); + goto err; + } + js_fv->offset = offset; + + type = cpfl_json_t_to_string(object, "type"); + if (!type) { + PMD_DRV_LOG(ERR, "Can not parse string 'type'."); + goto err; + } + ob_value = json_object_get(object, "value"); + if (strcmp(type, "immediate") == 0) { + js_fv->type = CPFL_FV_TYPE_IMMEDIATE; + js_fv->immediate = json_integer_value(ob_value); + } else if (strcmp(type, "metadata") == 0) { + js_fv->type = CPFL_FV_TYPE_METADATA; + cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv); + } else if (strcmp(type, "protocol") == 0) { + js_fv->type = CPFL_FV_TYPE_PROTOCOL; + cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv); + } else { + PMD_DRV_LOG(ERR, "Not support this type: %s.", type); + goto err; + } + } + + return 0; + +err: + rte_free(js_act->sem.fv); + return -EINVAL; +} + +static int +cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act) +{ + const char *type; + int ret; + + /* pr->actions->type */ + type = cpfl_json_t_to_string(ob_per_act, "type"); + if (!type) { + PMD_DRV_LOG(ERR, "Can not parse string 'type'."); + return -EINVAL; + } + /* pr->actions->data */ + if (strcmp(type, "sem") == 0) { + json_t *ob_fvs, *ob_sem; + + js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM; + ob_sem = json_object_get(ob_per_act, "data"); + ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'profile'."); + return -EINVAL; + } + ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'subprofile'."); + return -EINVAL; + } + ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'keysize'."); + return -EINVAL; + } + ob_fvs = json_object_get(ob_sem, "fieldvectors"); + ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act); + if (ret < 0) + return ret; + } else { + PMD_DRV_LOG(ERR, "Not support this type: %s.", type); + return -EINVAL; + } + + return 0; +} + +static int +cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr) +{ + int i, len, ret; + + len = json_array_size(ob_pr_acts); + if (len == 0) + return 0; + js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0); + if (!js_pr->actions) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + js_pr->actions_size = len; + for (i = 0; i < len; i++) { + struct cpfl_flow_js_pr_action *js_act; + json_t *object; + + object = json_array_get(ob_pr_acts, i); + js_act = &js_pr->actions[i]; + ret = cpfl_flow_js_pattern_per_act(object, js_act); + if (ret < 0) { + rte_free(js_pr->actions); + PMD_DRV_LOG(ERR, "Can not parse pattern action."); + return -EINVAL; + } + } + + return 0; +} + +/** + * The patterns object array defines a set of rules directing the PMD to match sequences of + * rte_flow protocol headers and translate them into profile/field vectors for each pipeline + * stage. This object is mandatory. + */ +static int +cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser) +{ + json_t *ob_prs; + int i, len; + + /* Pattern Rules */ + ob_prs = json_object_get(ob_root, "patterns"); + if (!ob_prs) { + PMD_DRV_LOG(ERR, "The patterns is mandatory."); + return -EINVAL; + } + + len = json_array_size(ob_prs); + if (len == 0) + return 0; + parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0); + if (!parser->patterns) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + parser->pr_size = len; + for (i = 0; i < len; i++) { + json_t *object; + json_t *ob_pr_actions; + json_t *ob_pr_key; + json_t *ob_pr_key_protos; + json_t *ob_pr_key_attrs; + int ret; + + object = json_array_get(ob_prs, i); + /* pr->key */ + ob_pr_key = json_object_get(object, "key"); + /* pr->key->protocols */ + ob_pr_key_protos = json_object_get(ob_pr_key, "protocols"); + ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse key->protocols."); + goto err; + } + /* pr->key->attributes */ + ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes"); + ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse key->attributes."); + goto err; + } + /* pr->actions */ + ob_pr_actions = json_object_get(object, "actions"); + ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse pattern action."); + goto err; + } + } + + return 0; + +err: + rte_free(parser->patterns); + return -EINVAL; +} + +static int +cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key) +{ + int len, i; + + len = json_array_size(ob_mr_keys); + if (len == 0) + return 0; + js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0); + if (!js_mr_key->actions) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + js_mr_key->actions_size = len; + for (i = 0; i < len; i++) { + json_t *object, *ob_data; + const char *type; + enum rte_flow_action_type act_type; + + object = json_array_get(ob_mr_keys, i); + /* mr->key->actions->type */ + type = cpfl_json_t_to_string(object, "type"); + if (!type) { + PMD_DRV_LOG(ERR, "Can not parse string 'type'."); + goto err; + } + act_type = cpfl_get_action_type_by_str(type); + if (act_type == RTE_FLOW_ACTION_TYPE_VOID) + goto err; + js_mr_key->actions[i].type = act_type; + /* mr->key->actions->data */ + ob_data = json_object_get(object, "data"); + if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) { + json_t *ob_protos; + int proto_size, j; + struct cpfl_flow_js_mr_key_action_vxlan_encap *encap; + + ob_protos = json_object_get(ob_data, "protocols"); + encap = &js_mr_key->actions[i].encap; + if (!ob_protos) { + encap->proto_size = 0; + continue; + } + proto_size = json_array_size(ob_protos); + encap->proto_size = proto_size; + for (j = 0; j < proto_size; j++) { + const char *s; + json_t *subobject; + enum rte_flow_item_type proto_type; + + subobject = json_array_get(ob_protos, j); + s = json_string_value(subobject); + proto_type = cpfl_get_item_type_by_str(s); + if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) { + PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed."); + goto err; + } + encap->protocols[j] = proto_type; + } + } else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) { + PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type); + goto err; + } + } + + return 0; + +err: + rte_free(js_mr_key->actions); + return -EINVAL; +} + +static int +cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod) +{ + int len, i; + + len = json_array_size(ob_layouts); + js_mod->layout_size = len; + if (len == 0) + return 0; + js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0); + if (!js_mod->layout) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + + for (i = 0; i < len; i++) { + json_t *object; + int index = 0, size = 0, offset = 0; + int ret; + const char *hint; + + object = json_array_get(ob_layouts, i); + ret = cpfl_json_t_to_int(object, "index", &index); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'index'."); + goto err; + } + js_mod->layout[i].index = index; + ret = cpfl_json_t_to_int(object, "size", &size); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'size'."); + goto err; + } + js_mod->layout[i].size = size; + ret = cpfl_json_t_to_int(object, "offset", &offset); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'offset'."); + goto err; + } + js_mod->layout[i].offset = offset; + hint = cpfl_json_t_to_string(object, "hint"); + if (!hint) { + PMD_DRV_LOG(ERR, "Can not parse string 'hint'."); + goto err; + } + memcpy(js_mod->layout[i].hint, hint, strlen(hint)); + } + + return 0; + +err: + rte_free(js_mod->layout); + return -EINVAL; +} + +static int +cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act) +{ + json_t *ob_data; + const char *type; + + /* mr->action->type */ + type = cpfl_json_t_to_string(ob_mr_act, "type"); + if (!type) { + PMD_DRV_LOG(ERR, "Can not parse string 'type'."); + return -EINVAL; + } + /* mr->action->data */ + ob_data = json_object_get(ob_mr_act, "data"); + if (strcmp(type, "mod") == 0) { + json_t *ob_layouts; + uint16_t profile = 0; + int ret; + + js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD; + ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse 'profile'."); + return -EINVAL; + } + js_mr_act->mod.prof = profile; + ob_layouts = json_object_get(ob_data, "layout"); + ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse layout."); + return ret; + } + } else { + PMD_DRV_LOG(ERR, "not support this type: %s.", type); + return -EINVAL; + } + + return 0; +} + +/** + * The modifications object array defines a set of rules for the PMD to match rte_flow + * modification actions and translate them into the Modification profile. This object + * is optional. + */ +static int +cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser) +{ + json_t *ob_mrs; + int i, len; + + ob_mrs = json_object_get(ob_root, "modifications"); + if (!ob_mrs) { + PMD_DRV_LOG(INFO, "The modifications is optional."); + return 0; + } + len = json_array_size(ob_mrs); + if (len == 0) + return 0; + parser->mr_size = len; + parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0); + if (!parser->modifications) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + for (i = 0; i < len; i++) { + int ret; + json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action; + + object = json_array_get(ob_mrs, i); + /* mr->key */ + ob_mr_key = json_object_get(object, "key"); + /* mr->key->actions */ + ob_mr_key_action = json_object_get(ob_mr_key, "actions"); + ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key); + if (ret < 0) { + PMD_DRV_LOG(ERR, "parse mr_key failed."); + goto err; + } + /* mr->action */ + ob_mr_action = json_object_get(object, "action"); + ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action); + if (ret < 0) { + PMD_DRV_LOG(ERR, "parse mr_action failed."); + goto err; + } + } + + return 0; + +err: + rte_free(parser->modifications); + return -EINVAL; +} + +static int +cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser) +{ + int ret = 0; + + ret = cpfl_flow_js_pattern_rule(ob_root, parser); + if (ret < 0) { + PMD_DRV_LOG(ERR, "parse pattern_rule failed."); + return ret; + } + ret = cpfl_flow_js_mod_rule(ob_root, parser); + if (ret < 0) { + PMD_DRV_LOG(ERR, "parse mod_rule failed."); + return ret; + } + + return 0; +} + +int +cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename) +{ + struct cpfl_flow_js_parser *parser; + json_error_t json_error; + json_t *root; + int ret; + + parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0); + if (!parser) { + PMD_DRV_LOG(ERR, "Not enough memory to create flow parser."); + return -ENOMEM; + } + root = json_load_file(filename, 0, &json_error); + if (!root) { + PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text); + goto free_parser; + } + ret = cpfl_parser_init(root, parser); + if (ret < 0) { + PMD_DRV_LOG(ERR, "parser init failed."); + goto free_parser; + } + *flow_parser = parser; + json_decref(root); + + return 0; +free_parser: + rte_free(parser); + return -EINVAL; +} + +static void +cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act) +{ + if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM) + rte_free(pr_act->sem.fv); +} + +int +cpfl_parser_destroy(struct cpfl_flow_js_parser *parser) +{ + int i, j; + + if (!parser) + return 0; + + for (i = 0; i < parser->pr_size; i++) { + struct cpfl_flow_js_pr *pattern = &parser->patterns[i]; + + if (!pattern) + continue; + for (j = 0; j < pattern->key.proto_size; j++) + rte_free(pattern->key.protocols[j].fields); + rte_free(pattern->key.protocols); + rte_free(pattern->key.attributes); + + for (j = 0; j < pattern->actions_size; j++) { + struct cpfl_flow_js_pr_action *pr_act; + + pr_act = &pattern->actions[j]; + cpfl_parser_free_pr_action(pr_act); + } + rte_free(pattern->actions); + } + rte_free(parser->patterns); + for (i = 0; i < parser->mr_size; i++) { + struct cpfl_flow_js_mr *mr = &parser->modifications[i]; + + if (!mr) + continue; + rte_free(mr->key.actions); + rte_free(mr->action.mod.layout); + } + rte_free(parser->modifications); + rte_free(parser); + + return 0; +} + +static int +cpfl_get_items_length(const struct rte_flow_item *items) +{ + int length = 0; + const struct rte_flow_item *item = items; + + while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END) + continue; + return length; +} + +static int +cpfl_get_actions_length(const struct rte_flow_action *actions) +{ + int length = 0; + const struct rte_flow_action *action = actions; + + while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END) + continue; + return length; +} + +static int +cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items, + uint16_t offset, uint8_t *fv) +{ + uint16_t v_layer, v_offset, v_mask; + enum rte_flow_item_type v_header; + int j, layer, length; + uint16_t temp_fv; + + length = cpfl_get_items_length(items); + v_layer = js_fv->proto.layer; + v_header = js_fv->proto.header; + v_offset = js_fv->proto.offset; + v_mask = js_fv->proto.mask; + layer = 0; + for (j = 0; j < length - 1; j++) { + if (items[j].type == v_header) { + if (layer == v_layer) { + /* copy out 16 bits from offset */ + const uint8_t *pointer; + + pointer = &(((const uint8_t *)(items[j].spec))[v_offset]); + temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask); + fv[2 * offset] = (uint8_t)(temp_fv >> 8); + fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff); + break; + } + layer++; + } + } + + return 0; +} + +static int +cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size, + uint8_t *fv, const struct rte_flow_item *items) +{ + int i, ret; + + for (i = 0; i < size; i++) { + uint16_t offset, temp_fv, value_int; + enum cpfl_flow_js_fv_type type; + struct cpfl_flow_js_fv *js_fv; + + js_fv = &js_fvs[i]; + offset = js_fv->offset; + type = js_fv->type; + if (type == CPFL_FV_TYPE_IMMEDIATE) { + value_int = js_fv->immediate; + temp_fv = (value_int << 8) & 0xff00; + fv[2 * offset] = (uint8_t)(temp_fv >> 8); + fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff); + } else if (type == CPFL_FV_TYPE_METADATA) { + uint16_t type, v_offset, mask; + + type = js_fv->meta.type; + v_offset = js_fv->meta.offset; + mask = js_fv->meta.mask; + temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask; + fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff); + fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8); + } else if (type == CPFL_FV_TYPE_PROTOCOL) { + ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv); + if (ret) + return ret; + } else { + PMD_DRV_LOG(DEBUG, "not support this type: %d.", type); + return -EINVAL; + } + } + + return 0; +} + +static int +cpfl_parse_pr_actions(struct cpfl_itf *itf, + struct cpfl_flow_js_pr_action *actions, + int size, + const struct rte_flow_item *items, + const struct rte_flow_attr *attr, + struct cpfl_flow_pr_action *pr_action) +{ + int i, ret; + + for (i = 0; i < size; i++) { + struct cpfl_flow_js_pr_action *pr_act; + enum cpfl_flow_pr_action_type type; + + pr_act = &actions[i]; + /* pr->actions->type */ + type = pr_act->type; + /* pr->actions->data */ + if (attr->group == 1 && type == CPFL_JS_PR_ACTION_TYPE_SEM) { + struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem; + + pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM; + pr_action->sem.prof = sem->prof; + pr_action->sem.subprof = sem->subprof; + pr_action->sem.keysize = sem->keysize; + memset(pr_action->sem.cpfl_flow_pr_fv, 0, + sizeof(pr_action->sem.cpfl_flow_pr_fv)); + ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size, + pr_action->sem.cpfl_flow_pr_fv, items); + return ret; + } else if (attr->group > 4 || attr->group == 0) { + return -EPERM; + } + } + + return 0; +} + +static int +cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr) +{ + int i, ret; + struct rte_ether_addr mask_bytes; + + ret = rte_ether_unformat_addr(mask, &mask_bytes); + if (ret < 0) { + PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed."); + return -EINVAL; + } + /* validate eth mask addr if match */ + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { + if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i]) + return -EINVAL; + } + + return 0; +} + +static int +cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr) +{ + uint32_t out_addr; + + /* 0: success; -EINVAL: invalid; -ENOTSUP: fail */ + int ret = inet_pton(AF_INET, mask, &out_addr); + + if (ret < 0) + return -EINVAL; + /* validate ipv4 mask addr if match */ + if (out_addr != addr) + return -EINVAL; + + return 0; +} + +static int +cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask) +{ + int field_size, j; + int flag_dst_addr, flag_src_addr, flag_ether_type; + struct cpfl_flow_js_pr_key_proto_field *field; + + if (!proto) + return 0; + field_size = proto->fields_size; + if (field_size != 0 && !eth_mask) + return -EINVAL; + + if (field_size == 0 && eth_mask) + return -EINVAL; + + if (field_size == 0 && !eth_mask) + return 0; + + flag_dst_addr = false; + flag_src_addr = false; + flag_ether_type = false; + for (j = 0; j < field_size; j++) { + const char *name, *s_mask; + + field = &proto->fields[j]; + /* match: rte_flow_item_eth.dst, more see Field Mapping + */ + name = field->name; + /* match: rte_flow_item->mask */ + if (strcmp(name, "src_addr") == 0) { + s_mask = field->mask; + if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0) + return -EINVAL; + flag_src_addr = true; + } else if (strcmp(name, "dst_addr") == 0) { + s_mask = field->mask; + if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0) + return -EINVAL; + flag_dst_addr = true; + } else if (strcmp(name, "ether_type") == 0) { + uint16_t mask = (uint16_t)field->mask_32b; + + if (mask != eth_mask->type) + return -EINVAL; + flag_ether_type = true; + } else { + /* TODO: more type... */ + PMD_DRV_LOG(ERR, "not support this name."); + return -EINVAL; + } + } + if (!flag_src_addr) { + if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0) + return -EINVAL; + } + if (!flag_dst_addr) { + if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0) + return -EINVAL; + } + if (!flag_ether_type) { + if (eth_mask->hdr.ether_type != (rte_be16_t)0) + return -EINVAL; + } + + return 0; +} + +static int +cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask) +{ + int field_size, j; + int flag_next_proto_id, flag_src_addr, flag_dst_addr; + struct cpfl_flow_js_pr_key_proto_field *field; + + if (!proto) + return 0; + + field_size = proto->fields_size; + if (field_size != 0 && !ipv4_mask) + return -EINVAL; + + if (field_size == 0 && ipv4_mask) + return -EINVAL; + + if (field_size == 0 && !ipv4_mask) + return 0; + + flag_dst_addr = false; + flag_src_addr = false; + flag_next_proto_id = false; + for (j = 0; j < field_size; j++) { + const char *name; + + field = &proto->fields[j]; + name = field->name; + if (strcmp(name, "src_addr") == 0) { + const char *mask; + + mask = field->mask; + if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0) + return -EINVAL; + flag_src_addr = true; + } else if (strcmp(name, "dst_addr") == 0) { + const char *mask; + + mask = field->mask; + if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0) + return -EINVAL; + flag_dst_addr = true; + } else if (strcmp(name, "next_proto_id") == 0) { + uint8_t mask; + + mask = (uint8_t)field->mask_32b; + if (mask != ipv4_mask->hdr.next_proto_id) + return -EINVAL; + flag_next_proto_id = true; + } else { + PMD_DRV_LOG(ERR, "not support this name."); + return -EINVAL; + } + } + if (!flag_src_addr) { + if (ipv4_mask->hdr.src_addr != (rte_be32_t)0) + return -EINVAL; + } + if (!flag_dst_addr) { + if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0) + return -EINVAL; + } + if (!flag_next_proto_id) { + if (ipv4_mask->hdr.next_proto_id != (uint8_t)0) + return -EINVAL; + } + + return 0; +} + +static int +cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask) +{ + int field_size, j; + int flag_src_port, flag_dst_port; + struct cpfl_flow_js_pr_key_proto_field *field; + + if (!proto) + return 0; + + field_size = proto->fields_size; + if (field_size != 0 && !tcp_mask) + return -EINVAL; + + if (field_size == 0 && tcp_mask) + return -EINVAL; + + if (field_size == 0 && !tcp_mask) + return 0; + + flag_src_port = false; + flag_dst_port = false; + for (j = 0; j < field_size; j++) { + const char *name; + uint16_t mask; + + field = &proto->fields[j]; + name = field->name; + mask = (uint16_t)field->mask_32b; + if (strcmp(name, "src_port") == 0) { + if (tcp_mask->hdr.src_port != mask) + return -EINVAL; + flag_src_port = true; + } else if (strcmp(name, "dst_port") == 0) { + if (tcp_mask->hdr.dst_port != mask) + return -EINVAL; + flag_dst_port = true; + } else { + PMD_DRV_LOG(ERR, "not support this name."); + return -EINVAL; + } + } + if (!flag_src_port) { + if (tcp_mask->hdr.src_port != (rte_be16_t)0) + return -EINVAL; + } + if (!flag_dst_port) { + if (tcp_mask->hdr.dst_port != (rte_be16_t)0) + return -EINVAL; + } + + return 0; +} + +static int +cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask) +{ + int field_size, j; + bool flag_src_port, flag_dst_port; + struct cpfl_flow_js_pr_key_proto_field *field; + + if (!proto) + return 0; + field_size = proto->fields_size; + if (field_size != 0 && !udp_mask) + return -EINVAL; + if (field_size == 0 && udp_mask) + return -EINVAL; + if (field_size == 0 && !udp_mask) + return 0; + flag_src_port = false; + flag_dst_port = false; + for (j = 0; j < field_size; j++) { + const char *name; + uint16_t mask; + + field = &proto->fields[j]; + /* match: rte_flow_item_eth.dst */ + name = field->name; /* match: rte_flow_item->mask */ + mask = (uint16_t)field->mask_32b; + if (strcmp(name, "src_port") == 0) { + if (udp_mask->hdr.src_port != mask) + return -EINVAL; + flag_src_port = true; + } else if (strcmp(name, "dst_port") == 0) { + if (udp_mask->hdr.dst_port != mask) + return -EINVAL; + flag_dst_port = true; + } else { + PMD_DRV_LOG(ERR, "not support this name: %s.", name); + return -EINVAL; + } + } + if (!flag_src_port) { + if (udp_mask->hdr.src_port != (rte_be16_t)0) + return -EINVAL; + } + if (!flag_dst_port) { + if (udp_mask->hdr.dst_port != (rte_be16_t)0) + return -EINVAL; + } + + return 0; +} + +static int +cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto, + const struct rte_flow_item_vxlan *vxlan_mask) +{ + int field_size, j; + struct cpfl_flow_js_pr_key_proto_field *field; + + if (!proto) + return 0; + field_size = proto->fields_size; + if (field_size != 0 && !vxlan_mask) + return -EINVAL; + if (field_size == 0 && vxlan_mask) + return -EINVAL; + if (field_size == 0 && !vxlan_mask) + return 0; + for (j = 0; j < field_size; j++) { + const char *name; + int64_t mask; + + field = &proto->fields[j]; + name = field->name; + /* match: rte_flow_item->mask */ + mask = (int64_t)field->mask_32b; + if (strcmp(name, "vx_vni") == 0) { + if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask) + return -EINVAL; + } else { + PMD_DRV_LOG(ERR, "not support this name."); + return -EINVAL; + } + } + + return 0; +} + +static int +cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask) +{ + int field_size; + + if (!proto) + return 0; + field_size = proto->fields_size; + if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask)) + return -EINVAL; + + return 0; +} + +static int +cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols, + int proto_size, + const struct rte_flow_item *items) +{ + int i, length; + int j = 0; + + length = cpfl_get_items_length(items); + if (proto_size > length - 1) + return -EINVAL; + for (i = 0; i < proto_size; i++) { + struct cpfl_flow_js_pr_key_proto *key_proto; + enum rte_flow_item_type type; + + key_proto = &protocols[i]; + /* pr->key->proto->type */ + type = key_proto->type; + /* pr->key->proto->fields */ + switch (type) { + case RTE_FLOW_ITEM_TYPE_ETH: + if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) { + const struct rte_flow_item_eth *eth_mask; + int ret; + + eth_mask = (const struct rte_flow_item_eth *)items[i].mask; + ret = cpfl_check_eth(key_proto, eth_mask); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) { + const struct rte_flow_item_ipv4 *ipv4_mask; + int ret; + + ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask; + ret = cpfl_check_ipv4(key_proto, ipv4_mask); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + break; + case RTE_FLOW_ITEM_TYPE_TCP: + if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) { + const struct rte_flow_item_tcp *tcp_mask; + int ret; + + tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask; + ret = cpfl_check_tcp(key_proto, tcp_mask); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) { + const struct rte_flow_item_udp *udp_mask; + int ret; + + udp_mask = (const struct rte_flow_item_udp *)items[i].mask; + ret = cpfl_check_udp(key_proto, udp_mask); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) { + const struct rte_flow_item_vxlan *vxlan_mask; + int ret; + + vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask; + ret = cpfl_check_vxlan(key_proto, vxlan_mask); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) { + const struct rte_flow_item_icmp *icmp_mask; + int ret; + + icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask; + ret = cpfl_check_icmp(key_proto, icmp_mask); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + break; + default: + PMD_DRV_LOG(ERR, "Not support this type: %d.", type); + return -EPERM; + } + } + if (items[j].type != RTE_FLOW_ITEM_TYPE_END) + return -EINVAL; + + return 0; +} + +static int +cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr, + const struct rte_flow_attr *attr) +{ + if (key_attr->ingress != attr->ingress) { + PMD_DRV_LOG(DEBUG, "ingress not match."); + return -EINVAL; + } + if (key_attr->egress != attr->egress) { + PMD_DRV_LOG(DEBUG, "egress not match."); + return -EINVAL; + } + + return 0; +} + +static int +cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern, + const struct rte_flow_item *items, + const struct rte_flow_attr *attr) +{ + int ret; + + /* pr->key */ + /* pr->key->protocols */ + ret = cpfl_check_pattern_key_proto(pattern->key.protocols, + pattern->key.proto_size, items); + if (ret < 0) + return -EINVAL; + /* pr->key->attributes */ + ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr); + if (ret < 0) + return -EINVAL; + + return 0; +} + +/* output: struct cpfl_flow_pr_action* pr_action */ +int +cpfl_flow_parse_items(struct cpfl_itf *itf, + struct cpfl_flow_js_parser *parser, + const struct rte_flow_item *items, + const struct rte_flow_attr *attr, + struct cpfl_flow_pr_action *pr_action) +{ + int i, size; + struct cpfl_flow_js_pr *pattern; + + size = parser->pr_size; + for (i = 0; i < size; i++) { + int ret; + + pattern = &parser->patterns[i]; + ret = cpfl_check_pattern_key(pattern, items, attr); + if (ret < 0) + continue; + /* pr->actions */ + ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size, + items, attr, pr_action); + return ret; + } + + return -EINVAL; +} + +/* modifications rules */ +static int +cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap, + const struct rte_flow_action *action) +{ + const struct rte_flow_action_vxlan_encap *action_vxlan_encap; + struct rte_flow_item *definition; + int def_length, i, proto_size; + + action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf; + definition = action_vxlan_encap->definition; + def_length = cpfl_get_items_length(definition); + proto_size = encap->proto_size; + if (proto_size != def_length - 1) { + PMD_DRV_LOG(DEBUG, "protocols not match."); + return -EINVAL; + } + for (i = 0; i < proto_size; i++) { + enum rte_flow_item_type proto; + + proto = encap->protocols[i]; + if (proto == RTE_FLOW_ITEM_TYPE_VLAN) { + if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) { + PMD_DRV_LOG(DEBUG, "protocols not match."); + return -EINVAL; + } + } else if (proto != definition[i].type) { + PMD_DRV_LOG(DEBUG, "protocols not match."); + return -EINVAL; + } + } + + return 0; +} + +/* check and parse */ +static int +cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size, + const struct rte_flow_action *actions, + struct cpfl_flow_mr_key_action *mr_key_action) +{ + int actions_length, i; + int j = 0; + int ret; + + actions_length = cpfl_get_actions_length(actions); + if (size > actions_length - 1) + return -EINVAL; + for (i = 0; i < size; i++) { + enum rte_flow_action_type type; + struct cpfl_flow_js_mr_key_action *key_act; + + key_act = &key_acts[i]; + /* mr->key->actions->type */ + type = key_act->type; + /* mr->key->actions->data */ + if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) { + int proto_size, k; + struct cpfl_flow_mr_key_action_vxlan_encap *encap; + + while (j < actions_length && + actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) { + j++; + } + if (j >= actions_length) + return -EINVAL; + mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP; + mr_key_action[i].encap.action = &actions[j]; + encap = &mr_key_action[i].encap; + + proto_size = key_act->encap.proto_size; + encap->proto_size = proto_size; + for (k = 0; k < proto_size; k++) { + enum rte_flow_item_type proto; + + proto = key_act->encap.protocols[k]; + encap->protocols[k] = proto; + } + ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]); + if (ret < 0) + return -EINVAL; + j++; + } else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) { + while (j < actions_length && + actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) { + j++; + } + if (j >= actions_length) + return -EINVAL; + mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP; + j++; + } else { + PMD_DRV_LOG(ERR, "Not support this type: %d.", type); + return -EPERM; + } + } + + return 0; +} + +/* output: uint8_t *buffer, uint16_t *byte_len */ +static int +cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size, + struct cpfl_flow_mr_key_action *mr_key_action, + uint8_t *buffer, uint16_t *byte_len) +{ + int i; + int start = 0; + + for (i = 0; i < layout_size; i++) { + int index, size, offset; + const char *hint; + const uint8_t *addr = NULL; + struct cpfl_flow_mr_key_action *temp; + struct cpfl_flow_js_mr_layout *layout; + + layout = &layouts[i]; + /* index links to the element of the actions array. */ + index = layout->index; + size = layout->size; + offset = layout->offset; + if (index == -1) { + hint = "dummpy"; + start += size; + continue; + } + hint = layout->hint; + temp = mr_key_action + index; + if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) { + const struct rte_flow_action_vxlan_encap *action_vxlan_encap; + struct rte_flow_item *definition; + int def_length, k; + + action_vxlan_encap = + (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf; + definition = action_vxlan_encap->definition; + def_length = cpfl_get_items_length(definition); + for (k = 0; k < def_length - 1; k++) { + if ((strcmp(hint, "eth") == 0 && + definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) || + (strcmp(hint, "ipv4") == 0 && + definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) || + (strcmp(hint, "udp") == 0 && + definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) || + (strcmp(hint, "tcp") == 0 && + definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) || + (strcmp(hint, "vxlan") == 0 && + definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) { + addr = (const uint8_t *)(definition[k].spec); + if (start > 255) { + *byte_len = 0; + PMD_DRV_LOG(ERR, "byte length is too long: %s", + hint); + return -EINVAL; + } + memcpy(buffer + start, addr + offset, size); + break; + } /* TODO: more hint... */ + } + if (k == def_length - 1) { + *byte_len = 0; + PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint); + return -EINVAL; + } + } else { + *byte_len = 0; + PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type); + return -EINVAL; + } /* else TODO: more type... */ + start += size; + } + *byte_len = start; + + return 0; +} + +static int +cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action, + struct cpfl_flow_mr_key_action *mr_key_action, + struct cpfl_flow_mr_action *mr_action) +{ + enum cpfl_flow_mr_action_type type; + + /* mr->action->type */ + type = action->type; + /* mr->action->data */ + if (type == CPFL_JS_MR_ACTION_TYPE_MOD) { + struct cpfl_flow_js_mr_layout *layout; + + mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD; + mr_action->mod.byte_len = 0; + mr_action->mod.prof = action->mod.prof; + layout = action->mod.layout; + if (!layout) + return 0; + memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data)); + + return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action, + mr_action->mod.data, &mr_action->mod.byte_len); + } + PMD_DRV_LOG(ERR, "Not support this type: %d.", type); + + return -EINVAL; +} + +static int +cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions, + struct cpfl_flow_mr_key_action *mr_key_action) +{ + int key_action_size; + + /* mr->key->actions */ + key_action_size = mr->key.actions_size; + return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action); +} + +/* output: struct cpfl_flow_mr_action *mr_action */ +static int +cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions, + struct cpfl_flow_mr_action *mr_action) +{ + int i; + struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0}; + + for (i = 0; i < parser->mr_size; i++) { + int ret; + struct cpfl_flow_js_mr *mr; + + mr = &parser->modifications[i]; + if (!mr) + return -EINVAL; + ret = cpfl_check_mod_key(mr, actions, mr_key_action); + if (ret < 0) + continue; + /* mr->action */ + return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action); + } + + return -EINVAL; +} + +int +cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions, + struct cpfl_flow_mr_action *mr_action) +{ + /* modifications rules */ + if (!parser->modifications) { + PMD_DRV_LOG(INFO, "The modifications is optional."); + return 0; + } + + return cpfl_parse_mod_rules(parser, actions, mr_action); +} + +bool +cpfl_metadata_write_port_id(struct cpfl_itf *itf) +{ + uint16_t dev_id; + const int type = 0; + const int offset = 5; + + dev_id = cpfl_get_port_id(itf); + if (dev_id == CPFL_INVALID_HW_ID) { + PMD_DRV_LOG(ERR, "fail to get hw ID\n"); + return false; + } + cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3); + + return true; +} + +bool +cpfl_metadata_write_targetvsi(struct cpfl_itf *itf) +{ + uint16_t dev_id; + const int type = 6; + const int offset = 2; + + dev_id = cpfl_get_vsi_id(itf); + if (dev_id == CPFL_INVALID_HW_ID) { + PMD_DRV_LOG(ERR, "fail to get hw ID"); + return false; + } + cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1); + + return true; +} + +bool +cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf) +{ + uint16_t dev_id; + const int type = 6; + const int offset = 0; + + dev_id = cpfl_get_vsi_id(itf); + if (dev_id == CPFL_INVALID_HW_ID) { + PMD_DRV_LOG(ERR, "fail to get hw ID"); + return false; + } + cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id); + + return true; +} + +bool cpfl_metadata_write_vsi(struct cpfl_itf *itf) +{ + uint16_t dev_id; + const int type = 0; + const int offset = 24; + + dev_id = cpfl_get_vsi_id(itf); + if (dev_id == CPFL_INVALID_HW_ID) { + PMD_DRV_LOG(ERR, "fail to get hw ID"); + return false; + } + cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id); + + return true; +} diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h new file mode 100644 index 00000000000..962667adc2f --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow_parser.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include +#include + +#include "cpfl_ethdev.h" + +#ifndef _CPFL_FLOW_PARSER_H_ +#define _CPFL_FLOW_PARSER_H_ + +#define CPFL_FLOW_JSON_STR_SIZE_MAX 100 +#define CPFL_MAX_SEM_FV_KEY_SIZE 64 +#define CPFL_FLOW_JS_PROTO_SIZE 16 +#define CPFL_MOD_KEY_NUM_MAX 8 + +/* Pattern Rules Storage */ +enum cpfl_flow_pr_action_type { + CPFL_JS_PR_ACTION_TYPE_SEM, + CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1, +}; + +/* This structure matches a sequence of fields in struct rte_flow_attr */ +struct cpfl_flow_js_pr_key_attr { + uint16_t ingress; + uint16_t egress; +}; + +struct cpfl_flow_js_pr_key_proto_field { + char name[CPFL_FLOW_JSON_STR_SIZE_MAX]; + union { + char mask[CPFL_FLOW_JSON_STR_SIZE_MAX]; + uint32_t mask_32b; + }; +}; + +/* This structure matches a sequence of "struct rte_flow_item" */ +struct cpfl_flow_js_pr_key_proto { + enum rte_flow_item_type type; + struct cpfl_flow_js_pr_key_proto_field *fields; + int fields_size; +}; + +enum cpfl_flow_js_fv_type { + CPFL_FV_TYPE_PROTOCOL, + CPFL_FV_TYPE_IMMEDIATE, + CPFL_FV_TYPE_METADATA, + CPFL_FV_TYPE_UNKNOWN = -1, +}; + +struct cpfl_flow_js_fv { + uint16_t offset; + enum cpfl_flow_js_fv_type type; + union { + /* a 16 bits value */ + uint16_t immediate; + /* a reference to a protocol header with a tuple */ + struct { + enum rte_flow_item_type header; + uint16_t layer; + uint16_t offset; + uint16_t mask; + } proto; + /* a reference to a metadata */ + struct { + uint16_t type; + uint16_t offset; + uint16_t mask; + } meta; + }; +}; + +/** + * This structure defines the message be used to composite the + * profile / key of a SEM control packet + */ +struct cpfl_flow_js_pr_action_sem { + uint16_t prof; /* SEM profile ID */ + uint16_t subprof; /* SEM subprofile ID */ + uint16_t keysize; /* extract key size in bytes */ + struct cpfl_flow_js_fv *fv; /* A SEM field vector array */ + int fv_size; +}; + +/* define how to map current key to low level pipeline configuration */ +struct cpfl_flow_js_pr_action { + enum cpfl_flow_pr_action_type type; + union { + struct cpfl_flow_js_pr_action_sem sem; + }; +}; + +/** + * This structure defines a set of rules that direct PMD how to parse rte_flow + * protocol headers. Each rule be described by a key object and a action array. + */ +struct cpfl_flow_js_pr { + struct { + struct cpfl_flow_js_pr_key_proto *protocols; + uint16_t proto_size; + struct cpfl_flow_js_pr_key_attr *attributes; + uint16_t attr_size; + } key; + /* An array to define how to map current key to low level pipeline configuration. */ + struct cpfl_flow_js_pr_action *actions; + uint16_t actions_size; +}; + +/* Modification Rules Storage */ +/** + * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP + * and include a sequence of protocol headers defined in field protocols + * of data. + */ +struct cpfl_flow_js_mr_key_action_vxlan_encap { + enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE]; + int proto_size; +}; + +/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */ +struct cpfl_flow_js_mr_key_action { + enum rte_flow_action_type type; + union { + struct cpfl_flow_js_mr_key_action_vxlan_encap encap; + }; +}; + +struct cpfl_flow_js_mr_key { + struct cpfl_flow_js_mr_key_action *actions; + int actions_size; +}; + +struct cpfl_flow_js_mr_layout { + int index; /* links to the element of the actions array */ + char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */ + uint16_t offset; /* the start byte of the data to copy from */ + uint16_t size; /* bytes of the data to be copied to the memory region */ +}; + +/** For mod data, besides the profile ID, a layout array defines a set of hints that helps + * driver composing the MOD memory region when the action need to insert/update some packet + * data from user input. + */ +struct cpfl_flow_js_mr_action_mod { + uint16_t prof; + uint16_t byte_len; + struct cpfl_flow_js_mr_layout *layout; + int layout_size; +}; + +enum cpfl_flow_mr_action_type { + CPFL_JS_MR_ACTION_TYPE_MOD, +}; + +/** Currently, the type can only be mod. + * + * For mod data, besides the profile ID, a layout array defines a set + * of hints that helps driver composing the MOD memory region when the + * action need to insert/update some packet data from user input. + */ +struct cpfl_flow_js_mr_action { + enum cpfl_flow_mr_action_type type; + union { + struct cpfl_flow_js_mr_action_mod mod; + }; +}; + +/** + * This structure defines a set of rules that direct PMD to parse rte_flow modification + * actions. Each rule be described by a pair of key and action + */ +struct cpfl_flow_js_mr { + struct cpfl_flow_js_mr_key key; + struct cpfl_flow_js_mr_action action; +}; + +struct cpfl_flow_js_parser { + struct cpfl_flow_js_pr *patterns; + int pr_size; + struct cpfl_flow_js_mr *modifications; + int mr_size; +}; + +/* Pattern Rules */ +struct cpfl_flow_pr_action_sem { + uint16_t prof; + uint16_t subprof; + uint16_t keysize; + uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE]; +}; + +struct cpfl_flow_pr_action { + enum cpfl_flow_pr_action_type type; + union { + struct cpfl_flow_pr_action_sem sem; + }; +}; + +/* Modification Rules */ +struct cpfl_flow_mr_key_action_vxlan_encap { + enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE]; + uint16_t proto_size; + const struct rte_flow_action *action; +}; + +struct cpfl_flow_mr_key_action { + enum rte_flow_action_type type; + union { + struct cpfl_flow_mr_key_action_vxlan_encap encap; + }; +}; + +struct cpfl_flow_mr_action_mod { + uint16_t prof; + uint16_t byte_len; + uint8_t data[256]; +}; + +struct cpfl_flow_mr_action { + enum cpfl_flow_mr_action_type type; + union { + struct cpfl_flow_mr_action_mod mod; + }; +}; + +int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename); +int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser); +int cpfl_flow_parse_items(struct cpfl_itf *itf, + struct cpfl_flow_js_parser *parser, + const struct rte_flow_item *items, + const struct rte_flow_attr *attr, + struct cpfl_flow_pr_action *pr_action); +int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, + const struct rte_flow_action *actions, + struct cpfl_flow_mr_action *mr_action); +bool cpfl_metadata_write_port_id(struct cpfl_itf *itf); +bool cpfl_metadata_write_vsi(struct cpfl_itf *itf); +bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf); +bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf); + +static inline void +cpfl_metadata_init(struct cpfl_metadata *meta) +{ + int i; + + for (i = 0; i < CPFL_META_LENGTH; i++) + meta->chunks[i].type = i; +} + +static inline void +cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data) +{ + memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t)); +} + +static inline void +cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data) +{ + memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t)); +} + +static inline uint16_t +cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset) +{ + return *((uint16_t *)(&meta->chunks[type].data[offset])); +} + +#endif diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c new file mode 100644 index 00000000000..ea65e205072 --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include "cpfl_ethdev.h" + +#include "cpfl_fxp_rule.h" +#include "cpfl_logs.h" + +#define CTLQ_SEND_RETRIES 100 +#define CTLQ_RECEIVE_RETRIES 100 + +int +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_msg **msg_ptr_list; + u16 clean_count = 0; + int num_cleaned = 0; + int retries = 0; + int ret = 0; + + msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *)); + if (!msg_ptr_list) { + PMD_INIT_LOG(ERR, "no memory for cleaning ctlq"); + ret = -ENOMEM; + goto err; + } + + ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg); + if (ret) { + PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret); + goto send_err; + } + + while (retries <= CTLQ_SEND_RETRIES) { + clean_count = num_q_msg - num_cleaned; + ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count, + &msg_ptr_list[num_cleaned]); + if (ret) { + PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret); + goto send_err; + } + + num_cleaned += clean_count; + retries++; + if (num_cleaned >= num_q_msg) + break; + rte_delay_us_sleep(10); + } + + if (retries > CTLQ_SEND_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for completions"); + ret = -1; + goto send_err; + } + +send_err: + if (msg_ptr_list) + free(msg_ptr_list); +err: + return ret; +} + +int +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + int retries = 0; + struct idpf_dma_mem *dma; + u16 i; + uint16_t buff_cnt; + int ret = 0; + + retries = 0; + while (retries <= CTLQ_RECEIVE_RETRIES) { + rte_delay_us_sleep(10); + ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]); + + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK && + ret != CPFL_ERR_CTLQ_ERROR) { + PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret); + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_NO_WORK) { + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_EMPTY) + break; + + /* TODO - process rx controlq message */ + for (i = 0; i < num_q_msg; i++) { + if (q_msg[i].data_len > 0) + dma = q_msg[i].ctx.indirect.payload; + else + dma = NULL; + + buff_cnt = dma ? 1 : 0; + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma); + if (ret) + PMD_INIT_LOG(WARNING, "could not posted recv bufs\n"); + } + break; + } + + if (retries > CTLQ_RECEIVE_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for receive response"); + ret = -1; + } + + return ret; +} + +static int +cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg) +{ + struct cpfl_mod_rule_info *minfo = &rinfo->mod; + union cpfl_rule_cfg_pkt_record *blob = NULL; + struct cpfl_rule_cfg_data cfg = {0}; + + /* prepare rule blob */ + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(&cfg, 0, sizeof(cfg)); + + /* fill info for both query and add/update */ + cpfl_fill_rule_mod_content(minfo->mod_obj_size, + minfo->pin_mod_content, + minfo->mod_index, + &cfg.ext.mod_content); + + /* only fill content for add/update */ + memcpy(blob->mod_blob, minfo->mod_content, + minfo->mod_content_byte_len); + +#define NO_HOST_NEEDED 0 + /* pack message */ + cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule, + rinfo->cookie, + 0, /* vsi_id not used for mod */ + rinfo->port_num, + NO_HOST_NEEDED, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + (u16)sizeof(*blob), + (void *)dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int +cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + union cpfl_rule_cfg_pkt_record *blob = NULL; + enum cpfl_ctlq_rule_cfg_opc opc; + struct cpfl_rule_cfg_data cfg; + uint16_t cfg_ctrl; + + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(msg, 0, sizeof(*msg)); + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id, + rinfo->sem.sub_prof_id, + rinfo->sem.pin_to_cache, + rinfo->sem.fixed_fetch); + cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len, + rinfo->act_bytes, rinfo->act_byte_len, + cfg_ctrl, blob); + opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule; + } else { + PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type); + return -1; + } + + cpfl_fill_rule_cfg_data_common(opc, + rinfo->cookie, + rinfo->vsi, + rinfo->port_num, + rinfo->host_id, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + sizeof(union cpfl_rule_cfg_pkt_record), + dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int +cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + int ret = 0; + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0) + ret = -1; + } else if (rinfo->type == CPFL_RULE_TYPE_MOD) { + if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0) + ret = -1; + } else { + PMD_INIT_LOG(ERR, "Invalid type of rule"); + ret = -1; + } + + return ret; +} + +int +cpfl_rule_process(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add) +{ + struct idpf_hw *hw = &itf->adapter->base.hw; + int i; + int ret = 0; + + if (rule_num == 0) + return 0; + + for (i = 0; i < rule_num; i++) { + ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add); + if (ret) { + PMD_INIT_LOG(ERR, "Could not pack rule"); + return ret; + } + } + ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to send control message"); + return ret; + } + ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to update rule"); + return ret; + } + + return 0; +} diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h new file mode 100644 index 00000000000..ed757b80b14 --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FXP_RULE_H_ +#define _CPFL_FXP_RULE_H_ + +#include "cpfl_rules.h" + +#define CPFL_MAX_KEY_LEN 128 +#define CPFL_MAX_RULE_ACTIONS 32 + +struct cpfl_sem_rule_info { + uint16_t prof_id; + uint8_t sub_prof_id; + uint8_t key[CPFL_MAX_KEY_LEN]; + uint8_t key_byte_len; + uint8_t pin_to_cache; + uint8_t fixed_fetch; +}; + +#define CPFL_MAX_MOD_CONTENT_LEN 256 +struct cpfl_mod_rule_info { + uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN]; + uint8_t mod_content_byte_len; + uint32_t mod_index; + uint8_t pin_mod_content; + uint8_t mod_obj_size; +}; + +enum cpfl_rule_type { + CPFL_RULE_TYPE_NONE, + CPFL_RULE_TYPE_SEM, + CPFL_RULE_TYPE_MOD +}; + +struct cpfl_rule_info { + enum cpfl_rule_type type; + uint64_t cookie; + uint8_t host_id; + uint8_t port_num; + uint8_t resp_req; + /* TODO: change this to be dynamically allocated/reallocated */ + uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)]; + uint8_t act_byte_len; + /* vsi is used for lem and lpm rules */ + uint16_t vsi; + uint8_t clear_mirror_1st_state; + /* mod related fields */ + union { + struct cpfl_mod_rule_info mod; + struct cpfl_sem_rule_info sem; + }; +}; + +extern struct cpfl_vport_ext *vport; + +int cpfl_rule_process(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add); +int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]); +int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]); +#endif /*CPFL_FXP_RULE_H*/ diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c new file mode 100644 index 00000000000..de3b4267271 --- /dev/null +++ b/drivers/net/cpfl/cpfl_representor.c @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Intel Corporation + */ + +#include "cpfl_representor.h" +#include "cpfl_rxtx.h" +#include "cpfl_flow.h" +#include "cpfl_rules.h" + +static int +cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter, + struct cpfl_repr_id *repr_id, + struct rte_eth_dev *dev) +{ + int ret; + + if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0) + return -ENOENT; + + ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev); + + return ret; +} + +static int +cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter, + struct cpfl_repr_id *repr_id) +{ + int ret; + + rte_spinlock_lock(&adapter->repr_lock); + if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) { + ret = -EEXIST; + goto err; + } + + ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id); + if (ret < 0) + goto err; + + rte_spinlock_unlock(&adapter->repr_lock); + return 0; +err: + rte_spinlock_unlock(&adapter->repr_lock); + return ret; +} + +static int +cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter, + struct rte_eth_devargs *eth_da) +{ + struct cpfl_repr_id repr_id; + int ret, c, p, v; + + for (c = 0; c < eth_da->nb_mh_controllers; c++) { + for (p = 0; p < eth_da->nb_ports; p++) { + repr_id.type = eth_da->type; + if (eth_da->type == RTE_ETH_REPRESENTOR_PF) { + repr_id.host_id = eth_da->mh_controllers[c]; + repr_id.pf_id = eth_da->ports[p]; + repr_id.vf_id = 0; + ret = cpfl_repr_allowlist_add(adapter, &repr_id); + if (ret == -EEXIST) + continue; + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, " + "host_id = %d, pf_id = %d.", + repr_id.host_id, repr_id.pf_id); + return ret; + } + } else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) { + for (v = 0; v < eth_da->nb_representor_ports; v++) { + repr_id.host_id = eth_da->mh_controllers[c]; + repr_id.pf_id = eth_da->ports[p]; + repr_id.vf_id = eth_da->representor_ports[v]; + ret = cpfl_repr_allowlist_add(adapter, &repr_id); + if (ret == -EEXIST) + continue; + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, " + "host_id = %d, pf_id = %d, vf_id = %d.", + repr_id.host_id, + repr_id.pf_id, + repr_id.vf_id); + return ret; + } + } + } + } + } + + return 0; +} + +int +cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_devargs *devargs = &adapter->devargs; + int ret, i, j; + + /* check and refine repr args */ + for (i = 0; i < devargs->repr_args_num; i++) { + struct rte_eth_devargs *eth_da = &devargs->repr_args[i]; + + /* set default host_id to host */ + if (eth_da->nb_mh_controllers == 0) { + eth_da->nb_mh_controllers = 1; + eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST; + } else { + for (j = 0; j < eth_da->nb_mh_controllers; j++) { + if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) { + PMD_INIT_LOG(ERR, "Invalid Host ID %d", + eth_da->mh_controllers[j]); + return -EINVAL; + } + } + } + + /* set default pf to APF */ + if (eth_da->nb_ports == 0) { + eth_da->nb_ports = 1; + eth_da->ports[0] = CPFL_PF_TYPE_APF; + } else { + for (j = 0; j < eth_da->nb_ports; j++) { + if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) { + PMD_INIT_LOG(ERR, "Invalid Host ID %d", + eth_da->ports[j]); + return -EINVAL; + } + } + } + + ret = cpfl_repr_devargs_process_one(adapter, eth_da); + if (ret != 0) + return ret; + } + + return 0; +} + +static int +cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter, + struct cpfl_repr_id *repr_id) +{ + int ret; + + rte_spinlock_lock(&adapter->repr_lock); + + ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist." + "host_id = %d, type = %d, pf_id = %d, vf_id = %d", + repr_id->host_id, repr_id->type, + repr_id->pf_id, repr_id->vf_id); + goto err; + } + + rte_spinlock_unlock(&adapter->repr_lock); + return 0; +err: + rte_spinlock_unlock(&adapter->repr_lock); + return ret; +} + +static int +cpfl_repr_uninit(struct rte_eth_dev *eth_dev) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev); + struct cpfl_adapter_ext *adapter = repr->itf.adapter; + + eth_dev->data->mac_addrs = NULL; + + cpfl_repr_allowlist_del(adapter, &repr->repr_id); + + return 0; +} + +static int +cpfl_repr_dev_configure(struct rte_eth_dev *dev) +{ + /* now only 1 RX queue is supported */ + if (dev->data->nb_rx_queues > 1) + return -EINVAL; + + return 0; +} + +static int +cpfl_repr_dev_close(struct rte_eth_dev *dev) +{ + return cpfl_repr_uninit(dev); +} + +static int +cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev); + + dev_info->device = ethdev->device; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE; + dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE; + + dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL; + + dev_info->rx_offload_capa = + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_QINQ_STRIP | + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_TIMESTAMP; + + dev_info->tx_offload_capa = + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = CPFL_MAX_RING_DESC, + .nb_min = CPFL_MIN_RING_DESC, + .nb_align = CPFL_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = CPFL_MAX_RING_DESC, + .nb_min = CPFL_MIN_RING_DESC, + .nb_align = CPFL_ALIGN_RING_DESC, + }; + + dev_info->switch_info.name = ethdev->device->name; + dev_info->switch_info.domain_id = 0; /* the same domain*/ + dev_info->switch_info.port_id = repr->vport_info->vport.info.vsi_id; + + return 0; +} + +static int +cpfl_repr_dev_start(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +cpfl_repr_dev_stop(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + dev->data->dev_started = 0; + return 0; +} + +static int +cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *conf, + __rte_unused struct rte_mempool *pool) +{ + /* Dummy */ + return 0; +} + +static int +cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *conf) +{ + /* Dummy */ + return 0; +} + +static int +cpfl_func_id_get(uint8_t host_id, uint8_t pf_id) +{ + if ((host_id != CPFL_HOST_ID_HOST && + host_id != CPFL_HOST_ID_ACC) || + (pf_id != CPFL_PF_TYPE_APF && + pf_id != CPFL_PF_TYPE_CPF)) + return -EINVAL; + + static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = { + [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF, + [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID, + [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID, + [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID, + }; + + return func_id_map[host_id][pf_id]; +} + +static int +cpfl_repr_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev); + struct rte_eth_link *dev_link = ðdev->data->dev_link; + struct cpfl_adapter_ext *adapter = repr->itf.adapter; + struct cpchnl2_get_vport_info_response response; + struct cpfl_vport_id vi; + int ret; + + if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) { + PMD_INIT_LOG(ERR, "This ethdev is not representor."); + return -EINVAL; + } + + if (wait_to_complete) { + if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id); + vi.vf_id = 0; + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr->repr_id.vf_id; + } + ret = cpfl_cc_vport_info_get(adapter, &repr->vport_info->vport.vport, + &vi, &response); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Fail to get vport info."); + return ret; + } + + if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED) + repr->func_up = true; + else + repr->func_up = false; + } + + dev_link->link_status = repr->func_up ? + RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; + + return 0; +} + +static int +cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + if (!dev) + return -EINVAL; + +#ifdef RTE_HAS_JANSSON + *ops = &cpfl_flow_ops; +#else + *ops = NULL; + PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library."); +#endif + return 0; +} + +static const struct eth_dev_ops cpfl_repr_dev_ops = { + .dev_start = cpfl_repr_dev_start, + .dev_stop = cpfl_repr_dev_stop, + .dev_configure = cpfl_repr_dev_configure, + .dev_close = cpfl_repr_dev_close, + .dev_infos_get = cpfl_repr_dev_info_get, + + .rx_queue_setup = cpfl_repr_rx_queue_setup, + .tx_queue_setup = cpfl_repr_tx_queue_setup, + + .link_update = cpfl_repr_link_update, + .flow_ops_get = cpfl_dev_repr_flow_ops_get, +}; + +static int +cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev); + struct cpfl_repr_param *param = init_param; + struct cpfl_adapter_ext *adapter = param->adapter; + int ret; + + repr->repr_id = param->repr_id; + repr->vport_info = param->vport_info; + repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR; + repr->itf.adapter = adapter; + repr->itf.data = eth_dev->data; + if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED) + repr->func_up = true; + + TAILQ_INIT(&repr->itf.flow_list); + memset(repr->itf.dma, 0, sizeof(repr->itf.dma)); + memset(repr->itf.msg, 0, sizeof(repr->itf.msg)); + ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma, + sizeof(union cpfl_rule_cfg_pkt_record), + CPFL_FLOW_BATCH_SIZE); + if (ret < 0) + return ret; + + eth_dev->dev_ops = &cpfl_repr_dev_ops; + + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + eth_dev->data->representor_id = + CPFL_REPRESENTOR_ID(repr->repr_id.type, + repr->repr_id.host_id, + repr->repr_id.pf_id, + repr->repr_id.vf_id); + + eth_dev->data->mac_addrs = &repr->mac_addr; + + rte_eth_random_addr(repr->mac_addr.addr_bytes); + + return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev); +} + +static bool +cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id, + struct cpchnl2_vport_info *info) +{ + int func_id; + + if (repr_id->type == RTE_ETH_REPRESENTOR_PF && + info->func_type == CPFL_VPORT_LAN_PF) { + func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + if (func_id < 0 || func_id != info->pf_id) + return false; + else + return true; + } else if (repr_id->type == RTE_ETH_REPRESENTOR_VF && + info->func_type == CPFL_VPORT_LAN_VF) { + if (repr_id->vf_id == info->vf_id) + return true; + } + + return false; +} + +static int +cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter, + const struct cpfl_repr_id *repr_id, + struct cpchnl2_get_vport_list_response *response) +{ + struct cpfl_vport_id vi; + int ret; + + if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + vi.vf_id = 0; + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr_id->vf_id; + } + + ret = cpfl_cc_vport_list_get(adapter, &vi, response); + + return ret; +} + +static int +cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter, + const struct cpfl_repr_id *repr_id, + struct cpchnl2_vport_id *vport_id, + struct cpchnl2_get_vport_info_response *response) +{ + struct cpfl_vport_id vi; + int ret; + + if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + vi.vf_id = 0; + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr_id->vf_id; + } + + ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response); + + return ret; +} + +static int +cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter, + const struct cpfl_repr_id *repr_id, uint32_t vport_id, + struct cpchnl2_get_vport_info_response *response) +{ + struct cpfl_vport_id vi; + int ret; + + vi.vport_id = vport_id; + if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr_id->vf_id; + } + + ret = cpfl_vport_info_create(adapter, &vi, (struct cpchnl2_event_vport_created *)response); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor."); + return ret; + } + + return 0; +} + +int +cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) +{ + struct rte_eth_dev *dev; + uint32_t iter = 0; + const struct cpfl_repr_id *repr_id; + const struct cpfl_vport_id *vp_id; + struct cpchnl2_get_vport_list_response *vlist_resp; + struct cpchnl2_get_vport_info_response vinfo_resp; + int ret; + + vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0); + if (vlist_resp == NULL) + return -ENOMEM; + + rte_spinlock_lock(&adapter->repr_lock); + + while (rte_hash_iterate(adapter->repr_allowlist_hash, + (const void **)&repr_id, (void **)&dev, &iter) >= 0) { + struct cpfl_vport_info *vi; + char name[RTE_ETH_NAME_MAX_LEN]; + uint32_t iter_iter = 0; + int i; + + /* skip representor already be created */ + if (dev != NULL) + continue; + + if (repr_id->type == RTE_ETH_REPRESENTOR_VF) + snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d", + pci_dev->name, + repr_id->host_id, + repr_id->pf_id, + repr_id->vf_id); + else + snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d", + pci_dev->name, + repr_id->host_id, + repr_id->pf_id); + + /* get vport list for the port representor */ + ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list", + repr_id->host_id, repr_id->pf_id, repr_id->vf_id); + goto err; + } + + if (vlist_resp->nof_vports == 0) { + PMD_INIT_LOG(WARNING, "No matched vport for representor %s", name); + continue; + } + + /* get all vport info for the port representor */ + for (i = 0; i < vlist_resp->nof_vports; i++) { + ret = cpfl_repr_vport_info_query(adapter, repr_id, + &vlist_resp->vports[i], &vinfo_resp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info", + repr_id->host_id, repr_id->pf_id, repr_id->vf_id, + vlist_resp->vports[i].vport_id); + goto err; + } + + ret = cpfl_repr_vport_map_update(adapter, repr_id, + vlist_resp->vports[i].vport_id, &vinfo_resp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to update host%d pf%d vf%d vport[%d]'s info to vport_map_hash", + repr_id->host_id, repr_id->pf_id, repr_id->vf_id, + vlist_resp->vports[i].vport_id); + goto err; + } + } + + /* find the matched vport */ + rte_spinlock_lock(&adapter->vport_map_lock); + + while (rte_hash_iterate(adapter->vport_map_hash, + (const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) { + struct cpfl_repr_param param; + + if (!cpfl_match_repr_with_vport(repr_id, &vi->vport.info)) + continue; + + param.adapter = adapter; + param.repr_id = *repr_id; + param.vport_info = vi; + + ret = rte_eth_dev_create(&pci_dev->device, + name, + sizeof(struct cpfl_repr), + NULL, NULL, cpfl_repr_init, + ¶m); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to create representor %s", name); + rte_spinlock_unlock(&adapter->vport_map_lock); + goto err; + } + break; + } + + rte_spinlock_unlock(&adapter->vport_map_lock); + } + +err: + rte_spinlock_unlock(&adapter->repr_lock); + rte_free(vlist_resp); + return ret; +} diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h new file mode 100644 index 00000000000..d3a4de531ec --- /dev/null +++ b/drivers/net/cpfl/cpfl_representor.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_REPRESENTOR_H_ +#define _CPFL_REPRESENTOR_H_ + +#include +#include + +struct cpfl_repr_id { + uint8_t host_id; + uint8_t pf_id; + uint8_t type; + uint8_t vf_id; +}; + +struct cpfl_repr_param { + struct cpfl_adapter_ext *adapter; + struct cpfl_repr_id repr_id; + struct cpfl_vport_info *vport_info; +}; + +int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter); +int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter); +#endif diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c new file mode 100644 index 00000000000..3d259d3da82 --- /dev/null +++ b/drivers/net/cpfl/cpfl_rules.c @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2023 Intel Corporation + */ + +#include +#include +#include "cpfl_rules.h" + + /** + * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor + */ +static inline uint64_t +cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg) +{ + uint64_t context = 0; + + switch (cmn_cfg->opc) { + case cpfl_ctlq_mod_query_rule: + case cpfl_ctlq_mod_add_update_rule: + /* fallthrough */ + case cpfl_ctlq_sem_query_rule_hash_addr: + case cpfl_ctlq_sem_query_del_rule_hash_addr: + case cpfl_ctlq_sem_add_rule: + case cpfl_ctlq_sem_del_rule: + case cpfl_ctlq_sem_query_rule: + case cpfl_ctlq_sem_update_rule: + context |= SHIFT_VAL64(cmn_cfg->time_sel, + MEV_RULE_TIME_SEL); + context |= SHIFT_VAL64(cmn_cfg->time_sel_val, + MEV_RULE_TIME_SEL_VAL); + context |= SHIFT_VAL64(cmn_cfg->host_id, + MEV_RULE_HOST_ID); + context |= SHIFT_VAL64(cmn_cfg->port_num, + MEV_RULE_PORT_NUM); + context |= SHIFT_VAL64(cmn_cfg->resp_req, + MEV_RULE_RESP_REQ); + context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru, + MEV_RULE_CACHE_WR_THRU); + break; + default: + break; + } + + return context; +} + +/** + * cpfl_prep_rule_desc_ctx - get bit context for descriptor + */ +static inline uint64_t +cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data) +{ + uint64_t context = 0; + + context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common); + + switch (cfg_data->common.opc) { + case cpfl_ctlq_mod_query_rule: + case cpfl_ctlq_mod_add_update_rule: + context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size, + MEV_RULE_MOD_OBJ_SIZE); + context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content, + MEV_RULE_PIN_MOD_CONTENT); + context |= SHIFT_VAL64(cfg_data->ext.mod_content.index, + MEV_RULE_MOD_INDEX); + break; + case cpfl_ctlq_sem_query_rule_hash_addr: + case cpfl_ctlq_sem_query_del_rule_hash_addr: + context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id, + MEV_RULE_OBJ_ID); + context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr, + MEV_RULE_OBJ_ADDR); + break; + default: + break; + } + + return context; +} + +/** + * cpfl_prep_rule_desc - build descriptor data from rule config data + * + * note: call this function before sending rule to HW via fast path + */ +void +cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data, + struct idpf_ctlq_msg *ctlq_msg) +{ + uint64_t context; + uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0]; + + context = cpfl_prep_rule_desc_ctx(cfg_data); + *ctlq_ctx = CPU_TO_LE64(context); + memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t)); + ctlq_msg->opcode = (uint16_t)cfg_data->common.opc; + ctlq_msg->data_len = cfg_data->common.buf_len; + ctlq_msg->status = 0; + ctlq_msg->ctx.indirect.payload = cfg_data->common.payload; +} + +/** + * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info + * note: call this function before sending rule to HW via fast path + */ +void +cpfl_prep_sem_rule_blob(const uint8_t *key, + uint8_t key_byte_len, + const uint8_t *act_bytes, + uint8_t act_byte_len, + uint16_t cfg_ctrl, + union cpfl_rule_cfg_pkt_record *rule_blob) +{ + uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions; + const uint32_t *act_src = (const uint32_t *)act_bytes; + uint32_t i; + + idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM); + idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len, + CPFL_NONDMA_TO_DMA); + + for (i = 0; i < act_byte_len / sizeof(uint32_t); i++) + *act_dst++ = CPU_TO_LE32(*act_src++); + + rule_blob->sem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF; + rule_blob->sem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF; +} diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h new file mode 100644 index 00000000000..d23eae8e917 --- /dev/null +++ b/drivers/net/cpfl/cpfl_rules.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2023 Intel Corporation + */ + +#ifndef _CPFL_RULES_API_H_ +#define _CPFL_RULES_API_H_ + +#include +#include "cpfl_actions.h" +#include "cpfl_controlq.h" + +/* Common Bit Mask Macros */ +#define CPFL_BIT(b) (1 << (b)) + +#define MAKE_MASK(type, mask, shift) ((u##type) (mask) << (shift)) +#define SHIFT_VAL_LT(type, val, field) \ + (((u##type)(val) << field##_S) & field##_M) +#define SHIFT_VAL_RT(type, val, field) \ + (((u##type)(val) & field##_M) >> field##_S) + +#define MAKE_MASK_VAL(type, bit_len) (((u##type)0x01 << (bit_len)) - 1) +#define MAKE_MASK_VAL16(bit_len) MAKE_MASK_VAL(16, bit_len) +#define MAKE_MASK_VAL64(bit_len) MAKE_MASK_VAL(64, bit_len) + +#define MAKE_MASK64(mask, shift) MAKE_MASK(64, mask, shift) +#define MAKE_MASK16(mask, shift) MAKE_MASK(16, mask, shift) +#define MAKE_MASK32(mask, shift) MAKE_MASK(32, mask, shift) + +/* Make masks with bit length and left-shifting count */ +#define MAKE_SMASK(type, bits, shift) \ + ((((u##type)1 << (bits)) - 1) << (shift)) +#define MAKE_SMASK64(bits, shift) MAKE_SMASK(64, bits, shift) +#define MAKE_SMASK32(bits, shift) MAKE_SMASK(32, bits, shift) +#define MAKE_SMASK16(bits, shift) MAKE_SMASK(16, bits, shift) + +#define SHIFT_VAL64(val, field) SHIFT_VAL_LT(64, val, field) +#define SHIFT_VAL32(val, field) SHIFT_VAL_LT(32, val, field) +#define SHIFT_VAL16(val, field) SHIFT_VAL_LT(16, val, field) + +/* Rule Config queue opcodes */ +enum cpfl_ctlq_rule_cfg_opc { + cpfl_ctlq_sem_add_rule = 0x1303, + cpfl_ctlq_sem_update_rule = 0x1304, + cpfl_ctlq_sem_del_rule = 0x1305, + cpfl_ctlq_sem_query_rule = 0x1306, + cpfl_ctlq_sem_query_rule_hash_addr = 0x1307, + cpfl_ctlq_sem_query_del_rule_hash_addr = 0x1308, + + cpfl_ctlq_mod_add_update_rule = 0x1360, + cpfl_ctlq_mod_query_rule = 0x1361, +}; + +enum cpfl_cfg_pkt_error_code { + CPFL_CFG_PKT_ERR_OK = 0, + CPFL_CFG_PKT_ERR_ESRCH = 1, /* Bad opcode */ + CPFL_CFG_PKT_ERR_EEXIST = 2, /* Entry Already exists */ + CPFL_CFG_PKT_ERR_ENOSPC = 4, /* No space left in the table*/ + CPFL_CFG_PKT_ERR_ERANGE = 5, /* Parameter out of range */ + CPFL_CFG_PKT_ERR_ESBCOMP = 6, /* Completion Error */ + CPFL_CFG_PKT_ERR_ENOPIN = 7, /* Entry cannot be pinned in cache */ + CPFL_CFG_PKT_ERR_ENOTFND = 8, /* Entry Not exists */ + CPFL_CFG_PKT_ERR_EMAXCOL = 9 /* Max Hash Collision */ +}; + +/* macros for creating context for rule descriptor */ +#define MEV_RULE_VSI_ID_S 0 +#define MEV_RULE_VSI_ID_M \ + MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S) + +#define MEV_RULE_TIME_SEL_S 13 +#define MEV_RULE_TIME_SEL_M \ + MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S) + +#define MEV_RULE_TIME_SEL_VAL_S 15 +#define MEV_RULE_TIME_SEL_VAL_M \ + MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S) + +#define MEV_RULE_PORT_NUM_S 16 +#define MEV_RULE_HOST_ID_S 18 +#define MEV_RULE_PORT_NUM_M \ + MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S) +#define MEV_RULE_HOST_ID_M \ + MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S) + +#define MEV_RULE_CACHE_WR_THRU_S 21 +#define MEV_RULE_CACHE_WR_THRU_M \ + MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S) + +#define MEV_RULE_RESP_REQ_S 22 +#define MEV_RULE_RESP_REQ_M \ + MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S) +#define MEV_RULE_OBJ_ADDR_S 24 +#define MEV_RULE_OBJ_ADDR_M \ + MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S) +#define MEV_RULE_OBJ_ID_S 59 +#define MEV_RULE_OBJ_ID_M \ + MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S) + +/* macros for creating CFG_CTRL for sem/lem rule blob */ +#define MEV_RULE_CFG_CTRL_PROF_ID_S 0 +#define MEV_RULE_CFG_CTRL_PROF_ID_M \ + MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S) + +#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S 11 +#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M \ + MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S) +#define MEV_RULE_CFG_CTRL_PIN_CACHE_S 13 +#define MEV_RULE_CFG_CTRL_PIN_CACHE_M \ + MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S) +#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S 14 +#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M \ + MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S) +#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S 15 +#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M \ + MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S) + +/** + * macro to build the CFG_CTRL for rule packet data, which is one of + * cpfl_prep_sem_rule_blob()'s input parameter. + */ + /* build SEM CFG_CTRL*/ +#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id, \ + pin_to_cache, fixed_fetch) \ + (SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID) | \ + SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID) | \ + SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE) | \ + SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH)) + +/* build LEM CFG_CTRL*/ +#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror) \ + (SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID) | \ + SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE) | \ + SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR)) + +/* macros for creating mod content config packets */ +#define MEV_RULE_MOD_INDEX_S 24 +#define MEV_RULE_MOD_INDEX_M \ + MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S) + +#define MEV_RULE_PIN_MOD_CONTENT_S 62 +#define MEV_RULE_PIN_MOD_CONTENT_M \ + MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S) +#define MEV_RULE_MOD_OBJ_SIZE_S 63 +#define MEV_RULE_MOD_OBJ_SIZE_M \ + MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S) + +/** + * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM + * note: The key may be in mixed big/little endian format, the rest of members + * are in little endian + */ +struct cpfl_sem_rule_cfg_pkt { +#define MEV_SEM_RULE_KEY_SIZE 128 + uint8_t key[MEV_SEM_RULE_KEY_SIZE]; + +#define MEV_SEM_RULE_ACT_SIZE 72 + uint8_t actions[MEV_SEM_RULE_ACT_SIZE]; + + /* Bit(s): + * 10:0 : PROFILE_ID + * 12:11: SUB_PROF_ID (used for SEM only) + * 13 : pin the SEM key content into the cache + * 14 : Reserved + * 15 : Fixed_fetch + */ + uint8_t cfg_ctrl[2]; + + /* Bit(s): + * 0: valid + * 15:1: Hints + * 26:16: PROFILE_ID, the profile associated with the entry + * 31:27: PF + * 55:32: FLOW ID (assigned by HW) + * 63:56: EPOCH + */ + uint8_t ctrl_word[8]; + uint8_t padding[46]; +}; + +/** + * union cpfl_rule_cfg_pkt_record - Describes rule data blob + */ +union cpfl_rule_cfg_pkt_record { + struct cpfl_sem_rule_cfg_pkt sem_rule; + uint8_t pkt_data[256]; + uint8_t mod_blob[256]; +}; + +/** + * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure + */ +struct cpfl_rule_query_addr { + uint8_t obj_id; + uint32_t obj_addr; +}; + +/** + * cpfl_rule_query_del_addr - Rule Query and Delete Address + */ +struct cpfl_rule_query_del_addr { + uint8_t obj_id; + uint32_t obj_addr; +}; + +/** + * cpfl_rule_mod_content - MOD Rule Content + */ +struct cpfl_rule_mod_content { + uint8_t obj_size; + uint8_t pin_content; + uint32_t index; +}; + +/** + * cpfl_rule_cfg_data_common - data struct for all rule opcodes + *note: some rules may only require part of structure + */ +struct cpfl_rule_cfg_data_common { + enum cpfl_ctlq_rule_cfg_opc opc; + uint64_t cookie; + uint16_t vsi_id; + uint8_t port_num; + uint8_t host_id; + uint8_t time_sel; + uint8_t time_sel_val; + uint8_t cache_wr_thru; + uint8_t resp_req; + uint32_t ret_val; + uint16_t buf_len; + struct idpf_dma_mem *payload; +}; + +/** + * cpfl_rule_cfg_data - rule config data + * note: Before sending rule to HW, caller needs to fill + * in this struct then call cpfl_prep_rule_desc(). + */ +struct cpfl_rule_cfg_data { + struct cpfl_rule_cfg_data_common common; + union { + struct cpfl_rule_query_addr query_addr; + struct cpfl_rule_query_del_addr query_del_addr; + struct cpfl_rule_mod_content mod_content; + } ext; +}; + +/** + * cpfl_fill_rule_mod_content - fill info for mod content + */ +static inline void +cpfl_fill_rule_mod_content(uint8_t mod_obj_size, + uint8_t pin_mod_content, + uint32_t mod_index, + struct cpfl_rule_mod_content *mod_content) +{ + mod_content->obj_size = mod_obj_size; + mod_content->pin_content = pin_mod_content; + mod_content->index = mod_index; +} + +/** + * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes + * note: call this function before calls cpfl_prep_rule_desc() + */ +static inline void +cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc, + uint64_t cookie, + uint16_t vsi_id, + uint8_t port_num, + uint8_t host_id, + uint8_t time_sel, + uint8_t time_sel_val, + uint8_t cache_wr_thru, + uint8_t resp_req, + uint16_t payload_len, + struct idpf_dma_mem *payload, + struct cpfl_rule_cfg_data_common *cfg_cmn) +{ + cfg_cmn->opc = opc; + cfg_cmn->cookie = cookie; + cfg_cmn->vsi_id = vsi_id; + cfg_cmn->port_num = port_num; + cfg_cmn->resp_req = resp_req; + cfg_cmn->ret_val = 0; + cfg_cmn->host_id = host_id; + cfg_cmn->time_sel = time_sel; + cfg_cmn->time_sel_val = time_sel_val; + cfg_cmn->cache_wr_thru = cache_wr_thru; + + cfg_cmn->buf_len = payload_len; + cfg_cmn->payload = payload; +} + +void +cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data, + struct idpf_ctlq_msg *ctlq_msg); + +void +cpfl_prep_sem_rule_blob(const uint8_t *key, + uint8_t key_byte_len, + const uint8_t *act_bytes, + uint8_t act_byte_len, + uint16_t cfg_ctrl, + union cpfl_rule_cfg_pkt_record *rule_blob); + +#endif /* _CPFL_RULES_API_H_ */ diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index 2ef6871a850..ab8bec46455 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -135,7 +135,7 @@ cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx, ring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc), CPFL_DMA_MEM_ALIGN); else - ring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_desc), + ring_size = RTE_ALIGN(len * sizeof(struct idpf_base_tx_desc), CPFL_DMA_MEM_ALIGN); memcpy(ring_name, "cpfl Tx ring", sizeof("cpfl Tx ring")); break; diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h index d8e9191196c..479e1ddcb9b 100644 --- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h +++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h @@ -25,7 +25,11 @@ RTE_ETH_RX_OFFLOAD_TIMESTAMP) #define CPFL_TX_NO_VECTOR_FLAGS ( \ RTE_ETH_TX_OFFLOAD_TCP_TSO | \ - RTE_ETH_TX_OFFLOAD_MULTI_SEGS) + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_CKSUM) static inline int cpfl_rx_vec_queue_default(struct idpf_rx_queue *rxq) @@ -81,6 +85,9 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev) struct cpfl_rx_queue *cpfl_rxq; int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH; + if (dev->data->scattered_rx) + return CPFL_SCALAR_PATH; + for (i = 0; i < dev->data->nb_rx_queues; i++) { cpfl_rxq = dev->data->rx_queues[i]; default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base); diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c new file mode 100644 index 00000000000..7d277a0e8e8 --- /dev/null +++ b/drivers/net/cpfl/cpfl_vchnl.c @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#include "cpfl_ethdev.h" +#include + +int +cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_list_response *response) +{ + struct cpchnl2_get_vport_list_request request; + struct idpf_cmd_info args; + int err; + + memset(&request, 0, sizeof(request)); + request.func_type = vi->func_type; + request.pf_id = vi->pf_id; + request.vf_id = vi->vf_id; + + memset(&args, 0, sizeof(args)); + args.ops = CPCHNL2_OP_GET_VPORT_LIST; + args.in_args = (uint8_t *)&request; + args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request); + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST"); + return err; + } + + rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE); + + return 0; +} + +int +cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, + struct cpchnl2_vport_id *vport_id, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_info_response *response) +{ + struct cpchnl2_get_vport_info_request request; + struct idpf_cmd_info args; + int err; + + request.vport.vport_id = vport_id->vport_id; + request.vport.vport_type = vport_id->vport_type; + request.func.func_type = vi->func_type; + request.func.pf_id = vi->pf_id; + request.func.vf_id = vi->vf_id; + + memset(&args, 0, sizeof(args)); + args.ops = CPCHNL2_OP_GET_VPORT_INFO; + args.in_args = (uint8_t *)&request; + args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request); + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO"); + return err; + } + + rte_memcpy(response, args.out_buffer, sizeof(*response)); + + return 0; +} + +int +cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter) +{ + struct virtchnl2_create_vport vport_msg; + struct idpf_cmd_info args; + int err = -1; + + memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport)); + vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT); + vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM; + vport_msg.num_tx_complq = 0; + vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM; + vport_msg.num_rx_bufq = 0; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CREATE_VPORT; + args.in_args = (uint8_t *)&vport_msg; + args.in_args_size = sizeof(vport_msg); + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT"); + return err; + } + + memcpy(adapter->ctrl_vport_recv_info, args.out_buffer, + IDPF_DFLT_MBX_BUF_SIZE); + return err; +} + +int +cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport *vport = &adapter->ctrl_vport; + struct virtchnl2_config_rx_queues *vc_rxqs = NULL; + struct virtchnl2_rxq_info *rxq_info; + struct idpf_cmd_info args; + uint16_t num_qs; + int size, err, i; + + if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) { + PMD_DRV_LOG(ERR, "This rxq model isn't supported."); + err = -EINVAL; + return err; + } + + num_qs = CPFL_RX_CFGQ_NUM; + size = sizeof(*vc_rxqs) + (num_qs - 1) * + sizeof(struct virtchnl2_rxq_info); + vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0); + if (!vc_rxqs) { + PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues"); + err = -ENOMEM; + return err; + } + vc_rxqs->vport_id = vport->base.vport_id; + vc_rxqs->num_qinfo = num_qs; + + for (i = 0; i < num_qs; i++) { + rxq_info = &vc_rxqs->qinfo[i]; + rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa; + rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX; + rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id; + rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE; + rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size; + rxq_info->max_pkt_size = vport->base.max_pkt_len; + rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; + rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE; + rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len; + } + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES; + args.in_args = (uint8_t *)vc_rxqs; + args.in_args_size = size; + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + rte_free(vc_rxqs); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES"); + + return err; +} + +int +cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport *vport = &adapter->ctrl_vport; + struct virtchnl2_config_tx_queues *vc_txqs = NULL; + struct virtchnl2_txq_info *txq_info; + struct idpf_cmd_info args; + uint16_t num_qs; + int size, err, i; + + if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) { + PMD_DRV_LOG(ERR, "This txq model isn't supported."); + err = -EINVAL; + return err; + } + + num_qs = CPFL_TX_CFGQ_NUM; + size = sizeof(*vc_txqs) + (num_qs - 1) * + sizeof(struct virtchnl2_txq_info); + vc_txqs = rte_zmalloc("cfg_txqs", size, 0); + if (!vc_txqs) { + PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues"); + err = -ENOMEM; + return err; + } + vc_txqs->vport_id = vport->base.vport_id; + vc_txqs->num_qinfo = num_qs; + + for (i = 0; i < num_qs; i++) { + txq_info = &vc_txqs->qinfo[i]; + txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa; + txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX; + txq_info->queue_id = adapter->cfgq_info[2 * i].id; + txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE; + txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; + txq_info->ring_len = adapter->cfgq_info[2 * i].len; + } + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES; + args.in_args = (uint8_t *)vc_txqs; + args.in_args_size = size; + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + rte_free(vc_txqs); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES"); + + return err; +} diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 8d62ebfd77c..e4e0e269bd3 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -11,11 +11,15 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0 subdir_done() endif -deps += ['common_idpf'] +deps += ['hash', 'common_idpf'] sources = files( 'cpfl_ethdev.c', 'cpfl_rxtx.c', + 'cpfl_vchnl.c', + 'cpfl_representor.c', + 'cpfl_controlq.c', + 'cpfl_rules.c', ) if arch_subdir == 'x86' @@ -36,3 +40,13 @@ if arch_subdir == 'x86' cflags += ['-DCC_AVX512_SUPPORT'] endif endif + +if dpdk_conf.has('RTE_HAS_JANSSON') + sources += files( + 'cpfl_flow.c', + 'cpfl_flow_engine_fxp.c', + 'cpfl_flow_parser.c', + 'cpfl_fxp_rule.c', + ) + ext_deps += jansson_dep +endif diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h index 8f2ffa0eeb1..7bee5cf3a85 100644 --- a/drivers/net/cxgbe/base/adapter.h +++ b/drivers/net/cxgbe/base/adapter.h @@ -511,13 +511,7 @@ static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, CXGBE_WRITE_REG64(adapter, reg_addr, val); } -#define PCI_STATUS 0x06 /* 16 bits */ -#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ -#define PCI_CAPABILITY_LIST 0x34 -/* Offset of first capability list entry */ -#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ -#define PCI_CAP_LIST_ID 0 /* Capability ID */ -#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ +#define PCI_CAP_ID_EXP RTE_PCI_CAP_ID_EXP #define PCI_EXP_DEVCTL 0x0008 /* Device control */ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ @@ -620,31 +614,12 @@ static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr, */ static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap) { - u16 status; - int ttl = 48; - u8 pos = 0; - u8 id = 0; - - t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status); - if (!(status & PCI_STATUS_CAP_LIST)) { + if (!rte_pci_has_capability_list(adapter->pdev)) { dev_err(adapter, "PCIe capability reading failed\n"); return -1; } - t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos); - while (ttl-- && pos >= 0x40) { - pos &= ~3; - t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id); - - if (id == 0xff) - break; - - if (id == cap) - return (int)pos; - - t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos); - } - return 0; + return rte_pci_find_capability(adapter->pdev, cap); } /** diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h index 8d3737fc610..0b02eb62a9e 100644 --- a/drivers/net/cxgbe/cxgbe_compat.h +++ b/drivers/net/cxgbe/cxgbe_compat.h @@ -198,7 +198,7 @@ static inline uint8_t hweight32(uint32_t word32) */ static inline int cxgbe_fls(int x) { - return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; + return x ? sizeof(x) * 8 - rte_clz32(x) : 0; } static inline unsigned long ilog2(unsigned long n) diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 45bbeaef0ce..8cc3d9f257a 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -414,6 +414,7 @@ int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; + uint16_t i; CXGBE_FUNC_TRACE(); @@ -429,6 +430,11 @@ int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) t4_sge_eth_clear_queues(pi); eth_dev->data->scattered_rx = 0; + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index a6c86113d12..ef4c06db6a4 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -399,6 +399,7 @@ static void dpaa_interrupt_handler(void *param) static int dpaa_eth_dev_start(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + uint16_t i; PMD_INIT_FUNC_TRACE(); @@ -413,12 +414,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) fman_if_enable_rx(dev->process_private); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) { struct fman_if *fif = dev->process_private; + uint16_t i; PMD_INIT_FUNC_TRACE(); dev->data->dev_started = 0; @@ -427,6 +434,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) fman_if_disable_rx(fif); dev->tx_pkt_burst = dpaa_eth_tx_drop_all; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 679f33ae1a0..8e610b6bba3 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -1278,6 +1278,11 @@ dpaa2_dev_start(struct rte_eth_dev *dev) if (priv->en_ordered) dev->tx_pkt_burst = dpaa2_dev_tx_ordered; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -1295,6 +1300,7 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) struct rte_device *rdev = dev->device; struct rte_intr_handle *intr_handle; struct rte_dpaa2_device *dpaa2_dev; + uint16_t i; dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); intr_handle = dpaa2_dev->intr_handle; @@ -1329,6 +1335,11 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(dev, &link); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 85910bbd8f0..23f7c4132d9 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -471,8 +471,7 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, sge = &sgt[i]; /*Resetting the buffer pool id and offset field*/ sge->fin_bpid_offset = 0; - DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); - DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); + DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(cur_seg)); sge->length = cur_seg->data_len; if (RTE_MBUF_DIRECT(cur_seg)) { /* if we are using inline SGT in same buffers diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index cb5ce2307b3..df5fbb78236 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -1576,6 +1576,8 @@ em_dev_clear_queues(struct rte_eth_dev *dev) em_tx_queue_release_mbufs(txq); em_reset_tx_queue(txq); } + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -1584,6 +1586,8 @@ em_dev_clear_queues(struct rte_eth_dev *dev) em_rx_queue_release_mbufs(rxq); em_reset_rx_queue(rxq); } + + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } @@ -1812,6 +1816,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) rxdctl |= E1000_RXDCTL_GRAN; E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + /* * Due to EM devices not having any sort of hardware * limit for packet length, jumbo frame of any size @@ -1946,6 +1952,8 @@ eth_em_tx_init(struct rte_eth_dev *dev) txdctl |= (txq->wthresh & 0x3F) << 16; txdctl |= E1000_TXDCTL_GRAN; E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } /* Program the Transmit Control Register. */ diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 61c6394310b..448c4b7d9d0 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -2745,6 +2745,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) else rxdctl |= ((rxq->wthresh & 0x1F) << 16); E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { @@ -2816,6 +2818,8 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) txdctl |= ((txq->wthresh & 0x1F) << 16); txdctl |= E1000_TXDCTL_QUEUE_ENABLE; E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } } diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index efcb163027c..7345e480f88 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -1171,6 +1171,7 @@ static int ena_start(struct rte_eth_dev *dev) struct ena_adapter *adapter = dev->data->dev_private; uint64_t ticks; int rc = 0; + uint16_t i; /* Cannot allocate memory in secondary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ -1208,6 +1209,11 @@ static int ena_start(struct rte_eth_dev *dev) ++adapter->dev_stats.dev_start; adapter->state = ENA_ADAPTER_STATE_RUNNING; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; err_rss_init: @@ -1223,6 +1229,7 @@ static int ena_stop(struct rte_eth_dev *dev) struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + uint16_t i; int rc; /* Cannot free memory in secondary process */ @@ -1254,6 +1261,11 @@ static int ena_stop(struct rte_eth_dev *dev) adapter->state = ENA_ADAPTER_STATE_STOPPED; dev->data->dev_started = 0; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c index 1b4337bc488..c9352f07468 100644 --- a/drivers/net/enetc/enetc_ethdev.c +++ b/drivers/net/enetc/enetc_ethdev.c @@ -17,6 +17,7 @@ enetc_dev_start(struct rte_eth_dev *dev) ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct enetc_hw *enetc_hw = &hw->hw; uint32_t val; + uint16_t i; PMD_INIT_FUNC_TRACE(); if (hw->device_id == ENETC_DEV_ID_VF) @@ -45,6 +46,11 @@ enetc_dev_start(struct rte_eth_dev *dev) ENETC_PM0_IFM_XGMII); } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -55,6 +61,7 @@ enetc_dev_stop(struct rte_eth_dev *dev) ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct enetc_hw *enetc_hw = &hw->hw; uint32_t val; + uint16_t i; PMD_INIT_FUNC_TRACE(); dev->data->dev_started = 0; @@ -69,6 +76,11 @@ enetc_dev_stop(struct rte_eth_dev *dev) enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index cdf09155919..a487256fa15 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -368,6 +368,7 @@ static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) { struct rte_eth_link link; struct enic *enic = pmd_priv(eth_dev); + uint16_t i; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -378,6 +379,11 @@ static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(eth_dev, &link); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build index 77dcd9e7ec7..8700ae27f28 100644 --- a/drivers/net/enic/meson.build +++ b/drivers/net/enic/meson.build @@ -28,14 +28,10 @@ sources = files( deps += ['hash'] includes += include_directories('base') -# The current implementation assumes 64-bit pointers -if cc.get_define('__AVX2__', args: machine_args) != '' and dpdk_conf.get('RTE_ARCH_64') - sources += files('enic_rxtx_vec_avx2.c') -# Build the avx2 handler if the compiler supports it, even though 'machine' -# does not. This is to support users who build for the min supported machine +# Build the avx2 handler for 64-bit X86 targets, even though 'machine' +# may not. This is to support users who build for the min supported machine # and need to run the binary on newer CPUs too. -# This part is from i40e meson.build -elif cc.has_argument('-mavx2') and dpdk_conf.get('RTE_ARCH_64') +if dpdk_conf.has('RTE_ARCH_X86_64') enic_avx2_lib = static_library('enic_avx2_lib', 'enic_rxtx_vec_avx2.c', dependencies: [static_rte_ethdev, static_rte_bus_pci], diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c index dfc4abe3e39..2b6914b1da3 100644 --- a/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -565,7 +565,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]); /* C.4 calc available number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + var = rte_popcount64(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != RTE_FM10K_DESCS_PER_LOOP)) break; diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index aa75abe1020..b441f966239 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -7,6 +7,7 @@ #include "base/gve_register.h" #include "base/gve_osdep.h" #include "gve_version.h" +#include "rte_ether.h" static void gve_write_version(uint8_t *driver_version_register) @@ -297,8 +298,8 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues = priv->max_nb_txq; dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE; dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN; - dev_info->max_mtu = GVE_MAX_MTU; - dev_info->min_mtu = GVE_MIN_MTU; + dev_info->max_mtu = priv->max_mtu; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->rx_offload_capa = 0; dev_info->tx_offload_capa = @@ -606,53 +607,17 @@ gve_teardown_device_resources(struct gve_priv *priv) gve_clear_device_resources_ok(priv); } -static uint8_t -pci_dev_find_capability(struct rte_pci_device *pdev, int cap) -{ - uint8_t pos, id; - uint16_t ent; - int loops; - int ret; - - ret = rte_pci_read_config(pdev, &pos, sizeof(pos), PCI_CAPABILITY_LIST); - if (ret != sizeof(pos)) - return 0; - - loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF; - - while (pos && loops--) { - ret = rte_pci_read_config(pdev, &ent, sizeof(ent), pos); - if (ret != sizeof(ent)) - return 0; - - id = ent & 0xff; - if (id == 0xff) - break; - - if (id == cap) - return pos; - - pos = (ent >> 8); - } - - return 0; -} - static int pci_dev_msix_vec_count(struct rte_pci_device *pdev) { - uint8_t msix_cap = pci_dev_find_capability(pdev, PCI_CAP_ID_MSIX); + off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX); uint16_t control; - int ret; - if (!msix_cap) - return 0; - - ret = rte_pci_read_config(pdev, &control, sizeof(control), msix_cap + PCI_MSIX_FLAGS); - if (ret != sizeof(control)) - return 0; + if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control), + msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control)) + return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1; - return (control & PCI_MSIX_FLAGS_QSIZE) + 1; + return 0; } static int diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index c9bcfa553ce..1cba282128c 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -8,26 +8,14 @@ #include #include #include +#include #include "base/gve.h" /* TODO: this is a workaround to ensure that Tx complq is enough */ #define DQO_TX_MULTIPLIER 4 -/* - * Following macros are derived from linux/pci_regs.h, however, - * we can't simply include that header here, as there is no such - * file for non-Linux platform. - */ -#define PCI_CFG_SPACE_SIZE 256 -#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ -#define PCI_STD_HEADER_SIZEOF 64 -#define PCI_CAP_SIZEOF 4 -#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ -#define PCI_MSIX_FLAGS 2 /* Message Control */ -#define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */ - -#define GVE_DEFAULT_RX_FREE_THRESH 512 +#define GVE_DEFAULT_RX_FREE_THRESH 64 #define GVE_DEFAULT_TX_FREE_THRESH 32 #define GVE_DEFAULT_TX_RS_THRESH 32 #define GVE_TX_MAX_FREE_SZ 512 @@ -35,9 +23,6 @@ #define GVE_MIN_BUF_SIZE 1024 #define GVE_MAX_RX_PKTLEN 65535 -#define GVE_MAX_MTU RTE_ETHER_MTU -#define GVE_MIN_MTU RTE_ETHER_MIN_MTU - #define GVE_TX_CKSUM_OFFLOAD_MASK ( \ RTE_MBUF_F_TX_L4_MASK | \ RTE_MBUF_F_TX_TCP_SEG) diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index 236aefd2a80..7e7ddac48ee 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -12,8 +12,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq) { volatile struct gve_rx_desc_dqo *rx_buf_ring; volatile struct gve_rx_desc_dqo *rx_buf_desc; - struct rte_mbuf *nmb[rxq->free_thresh]; - uint16_t nb_refill = rxq->free_thresh; + struct rte_mbuf *nmb[rxq->nb_rx_hold]; + uint16_t nb_refill = rxq->nb_rx_hold; uint16_t nb_desc = rxq->nb_rx_desc; uint16_t next_avail = rxq->bufq_tail; struct rte_eth_dev *dev; diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 7aa5e7d8e92..adc9f75c81a 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -980,6 +980,7 @@ static int hinic_dev_start(struct rte_eth_dev *dev) int rc; char *name; struct hinic_nic_dev *nic_dev; + uint16_t i; nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); name = dev->data->name; @@ -1047,6 +1048,11 @@ static int hinic_dev_start(struct rte_eth_dev *dev) rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; en_port_fail: @@ -1169,6 +1175,7 @@ static int hinic_dev_stop(struct rte_eth_dev *dev) uint16_t port_id; struct hinic_nic_dev *nic_dev; struct rte_eth_link link; + uint16_t i; nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); name = dev->data->name; @@ -1215,6 +1222,11 @@ static int hinic_dev_stop(struct rte_eth_dev *dev) hinic_free_all_rx_mbuf(dev); hinic_free_all_tx_mbuf(dev); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c index a11ea686fd0..c4d47f43fe4 100644 --- a/drivers/net/hns3/hns3_common.c +++ b/drivers/net/hns3/hns3_common.c @@ -386,7 +386,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); - return -EINVAL; + return -ENOSPC; } /* Check if input mac addresses are valid */ @@ -444,6 +444,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, uint32_t nb_mc_addr) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct rte_ether_addr *addr; int cur_addr_num; int set_addr_num; @@ -451,6 +452,15 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, int ret; int i; + if (mc_addr_set == NULL || nb_mc_addr == 0) { + rte_spinlock_lock(&hw->lock); + ret = hns3_configure_all_mc_mac_addr(hns, true); + if (ret == 0) + hw->mc_addrs_num = 0; + rte_spinlock_unlock(&hw->lock); + return ret; + } + /* Check if input parameters are valid */ ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); if (ret) diff --git a/drivers/net/hns3/hns3_dump.c b/drivers/net/hns3/hns3_dump.c index c0839380ea8..5c21ff0a331 100644 --- a/drivers/net/hns3/hns3_dump.c +++ b/drivers/net/hns3/hns3_dump.c @@ -664,10 +664,10 @@ hns3_get_tm_conf_shaper_info(FILE *file, struct hns3_tm_conf *conf) if (conf->nb_shaper_profile == 0) return; - fprintf(file, " shaper_profile:\n"); + fprintf(file, "\t -- shaper_profile:\n"); TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { fprintf(file, - " id=%u reference_count=%u peak_rate=%" PRIu64 "Bps\n", + "\t id=%u reference_count=%u peak_rate=%" PRIu64 "Bps\n", shaper_profile->shaper_profile_id, shaper_profile->reference_count, shaper_profile->profile.peak.rate); @@ -681,8 +681,8 @@ hns3_get_tm_conf_port_node_info(FILE *file, struct hns3_tm_conf *conf) return; fprintf(file, - " port_node:\n" - " node_id=%u reference_count=%u shaper_profile_id=%d\n", + "\t -- port_node:\n" + "\t node_id=%u reference_count=%u shaper_profile_id=%d\n", conf->root->id, conf->root->reference_count, conf->root->shaper_profile ? (int)conf->root->shaper_profile->shaper_profile_id : -1); @@ -699,7 +699,7 @@ hns3_get_tm_conf_tc_node_info(FILE *file, struct hns3_tm_conf *conf) if (conf->nb_tc_node == 0) return; - fprintf(file, " tc_node:\n"); + fprintf(file, "\t -- tc_node:\n"); memset(tc_node, 0, sizeof(tc_node)); TAILQ_FOREACH(tm_node, tc_list, node) { tidx = hns3_tm_calc_node_tc_no(conf, tm_node->id); @@ -712,7 +712,7 @@ hns3_get_tm_conf_tc_node_info(FILE *file, struct hns3_tm_conf *conf) if (tm_node == NULL) continue; fprintf(file, - " id=%u TC%u reference_count=%u parent_id=%d " + "\t id=%u TC%u reference_count=%u parent_id=%d " "shaper_profile_id=%d\n", tm_node->id, hns3_tm_calc_node_tc_no(conf, tm_node->id), tm_node->reference_count, @@ -738,7 +738,7 @@ hns3_get_tm_conf_queue_format_info(FILE *file, struct hns3_tm_node **queue_node, end_queue_id = (i + 1) * HNS3_PERLINE_QUEUES - 1; if (end_queue_id > nb_tx_queues - 1) end_queue_id = nb_tx_queues - 1; - fprintf(file, " %04u - %04u | ", start_queue_id, + fprintf(file, "\t %04u - %04u | ", start_queue_id, end_queue_id); for (j = start_queue_id; j < nb_tx_queues; j++) { if (j >= end_queue_id + 1) @@ -767,8 +767,8 @@ hns3_get_tm_conf_queue_node_info(FILE *file, struct hns3_tm_conf *conf, return; fprintf(file, - " queue_node:\n" - " tx queue id | mapped tc (8 mean node not exist)\n"); + "\t -- queue_node:\n" + "\t tx queue id | mapped tc (8 mean node not exist)\n"); memset(queue_node, 0, sizeof(queue_node)); memset(queue_node_tc, 0, sizeof(queue_node_tc)); @@ -918,6 +918,8 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + rte_spinlock_lock(&hw->lock); + hns3_get_device_basic_info(file, dev); hns3_get_dev_feature_capability(file, hw); hns3_get_rxtx_queue_info(file, dev); @@ -927,8 +929,10 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) * VF only supports dumping basic info, feature capability and queue * info. */ - if (hns->is_vf) + if (hns->is_vf) { + rte_spinlock_unlock(&hw->lock); return 0; + } hns3_get_dev_mac_info(file, hns); hns3_get_vlan_config_info(file, hw); @@ -936,6 +940,8 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) hns3_get_tm_conf_info(file, dev); hns3_get_flow_ctrl_info(file, dev); + rte_spinlock_unlock(&hw->lock); + return 0; } diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 95c72e86aaf..964f47f1641 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -5936,7 +5936,7 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, speed_capa = hns3_get_speed_capa(hw); /* speed_num counts number of speed capabilities */ - speed_num = __builtin_popcount(speed_capa & HNS3_SPEEDS_SUPP_FEC); + speed_num = rte_popcount32(speed_capa & HNS3_SPEEDS_SUPP_FEC); if (speed_num == 0) return -ENOTSUP; @@ -6143,7 +6143,7 @@ hns3_fec_mode_valid(struct rte_eth_dev *dev, uint32_t mode) struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); uint32_t cur_capa; - if (__builtin_popcount(mode) != 1) { + if (rte_popcount32(mode) != 1) { hns3_err(hw, "FEC mode(0x%x) should be only one bit set", mode); return -EINVAL; } diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 5aac62a41fc..007f5d619fb 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2,7 +2,6 @@ * Copyright(c) 2018-2021 HiSilicon Limited. */ -#include #include #include #include @@ -49,115 +48,35 @@ static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete); -/* set PCI bus mastering */ static int -hns3vf_set_bus_master(const struct rte_pci_device *device, bool op) -{ - uint16_t reg; - int ret; - - ret = rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND); - if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", - PCI_COMMAND); - return ret; - } - - if (op) - /* set the master bit */ - reg |= PCI_COMMAND_MASTER; - else - reg &= ~(PCI_COMMAND_MASTER); - - return rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND); -} - -/** - * hns3vf_find_pci_capability - lookup a capability in the PCI capability list - * @cap: the capability - * - * Return the address of the given capability within the PCI capability list. - */ -static int -hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap) +hns3vf_enable_msix(const struct rte_pci_device *device, bool op) { -#define MAX_PCIE_CAPABILITY 48 - uint16_t status; - uint8_t pos; - uint8_t id; - int ttl; + uint16_t control; + off_t pos; int ret; - ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS); - if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS); - return 0; - } - - if (!(status & PCI_STATUS_CAP_LIST)) - return 0; - - ttl = MAX_PCIE_CAPABILITY; - ret = rte_pci_read_config(device, &pos, sizeof(pos), - PCI_CAPABILITY_LIST); - if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", - PCI_CAPABILITY_LIST); + if (!rte_pci_has_capability_list(device)) { + PMD_INIT_LOG(ERR, "Failed to read PCI capability list"); return 0; } - while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) { - ret = rte_pci_read_config(device, &id, sizeof(id), - (pos + PCI_CAP_LIST_ID)); - if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", - (pos + PCI_CAP_LIST_ID)); - break; - } - - if (id == 0xFF) - break; - - if (id == cap) - return (int)pos; - - ret = rte_pci_read_config(device, &pos, sizeof(pos), - (pos + PCI_CAP_LIST_NEXT)); - if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", - (pos + PCI_CAP_LIST_NEXT)); - break; - } - } - return 0; -} - -static int -hns3vf_enable_msix(const struct rte_pci_device *device, bool op) -{ - uint16_t control; - int pos; - int ret; - - pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX); - if (pos) { + pos = rte_pci_find_capability(device, RTE_PCI_CAP_ID_MSIX); + if (pos > 0) { ret = rte_pci_read_config(device, &control, sizeof(control), - (pos + PCI_MSIX_FLAGS)); + pos + RTE_PCI_MSIX_FLAGS); if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", - (pos + PCI_MSIX_FLAGS)); + PMD_INIT_LOG(ERR, "Failed to read MSIX flags"); return -ENXIO; } if (op) - control |= PCI_MSIX_FLAGS_ENABLE; + control |= RTE_PCI_MSIX_FLAGS_ENABLE; else - control &= ~PCI_MSIX_FLAGS_ENABLE; + control &= ~RTE_PCI_MSIX_FLAGS_ENABLE; ret = rte_pci_write_config(device, &control, sizeof(control), - (pos + PCI_MSIX_FLAGS)); + pos + RTE_PCI_MSIX_FLAGS); if (ret < 0) { - PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x", - (pos + PCI_MSIX_FLAGS)); + PMD_INIT_LOG(ERR, "failed to write MSIX flags"); return -ENXIO; } @@ -250,6 +169,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str, ret); } + rte_spinlock_unlock(&hw->lock); + return ret; } rte_ether_addr_copy(mac_addr, @@ -2140,7 +2061,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) if (hw->reset.level == HNS3_VF_FULL_RESET) { rte_intr_disable(pci_dev->intr_handle); - ret = hns3vf_set_bus_master(pci_dev, true); + ret = rte_pci_set_bus_master(pci_dev, true); if (ret < 0) { hns3_err(hw, "failed to set pci bus, ret = %d", ret); return ret; diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c index cd9264d91bb..9708ec614e0 100644 --- a/drivers/net/hns3/hns3_rxtx_vec.c +++ b/drivers/net/hns3/hns3_rxtx_vec.c @@ -55,57 +55,6 @@ hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } -static inline void -hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq) -{ -#define REARM_LOOP_STEP_NUM 4 - struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start]; - struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start; - uint64_t dma_addr; - int i; - - if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, - HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { - rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; - return; - } - - for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM, - rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) { - if (likely(i < - HNS3_DEFAULT_RXQ_REARM_THRESH - REARM_LOOP_STEP_NUM)) { - rte_prefetch_non_temporal(rxep[4].mbuf); - rte_prefetch_non_temporal(rxep[5].mbuf); - rte_prefetch_non_temporal(rxep[6].mbuf); - rte_prefetch_non_temporal(rxep[7].mbuf); - } - - dma_addr = rte_mbuf_data_iova_default(rxep[0].mbuf); - rxdp[0].addr = rte_cpu_to_le_64(dma_addr); - rxdp[0].rx.bd_base_info = 0; - - dma_addr = rte_mbuf_data_iova_default(rxep[1].mbuf); - rxdp[1].addr = rte_cpu_to_le_64(dma_addr); - rxdp[1].rx.bd_base_info = 0; - - dma_addr = rte_mbuf_data_iova_default(rxep[2].mbuf); - rxdp[2].addr = rte_cpu_to_le_64(dma_addr); - rxdp[2].rx.bd_base_info = 0; - - dma_addr = rte_mbuf_data_iova_default(rxep[3].mbuf); - rxdp[3].addr = rte_cpu_to_le_64(dma_addr); - rxdp[3].rx.bd_base_info = 0; - } - - rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH; - if (rxq->rx_rearm_start >= rxq->nb_rx_desc) - rxq->rx_rearm_start = 0; - - rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH; - - hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH); -} - uint16_t hns3_recv_pkts_vec(void *__restrict rx_queue, struct rte_mbuf **__restrict rx_pkts, diff --git a/drivers/net/hns3/hns3_rxtx_vec.h b/drivers/net/hns3/hns3_rxtx_vec.h index 2c8a91921e7..a9a6774294e 100644 --- a/drivers/net/hns3/hns3_rxtx_vec.h +++ b/drivers/net/hns3/hns3_rxtx_vec.h @@ -94,4 +94,55 @@ hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts, return count; } + +static inline void +hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq) +{ +#define REARM_LOOP_STEP_NUM 4 + struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start]; + struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start; + uint64_t dma_addr; + int i; + + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + return; + } + + for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM, + rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) { + if (likely(i < + HNS3_DEFAULT_RXQ_REARM_THRESH - REARM_LOOP_STEP_NUM)) { + rte_prefetch_non_temporal(rxep[4].mbuf); + rte_prefetch_non_temporal(rxep[5].mbuf); + rte_prefetch_non_temporal(rxep[6].mbuf); + rte_prefetch_non_temporal(rxep[7].mbuf); + } + + dma_addr = rte_mbuf_data_iova_default(rxep[0].mbuf); + rxdp[0].addr = rte_cpu_to_le_64(dma_addr); + rxdp[0].rx.bd_base_info = 0; + + dma_addr = rte_mbuf_data_iova_default(rxep[1].mbuf); + rxdp[1].addr = rte_cpu_to_le_64(dma_addr); + rxdp[1].rx.bd_base_info = 0; + + dma_addr = rte_mbuf_data_iova_default(rxep[2].mbuf); + rxdp[2].addr = rte_cpu_to_le_64(dma_addr); + rxdp[2].rx.bd_base_info = 0; + + dma_addr = rte_mbuf_data_iova_default(rxep[3].mbuf); + rxdp[3].addr = rte_cpu_to_le_64(dma_addr); + rxdp[3].rx.bd_base_info = 0; + } + + rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH; + if (rxq->rx_rearm_start >= rxq->nb_rx_desc) + rxq->rx_rearm_start = 0; + + rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH; + + hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH); +} #endif /* HNS3_RXTX_VEC_H */ diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h index 564d831a485..0dc6b9f0a22 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -180,19 +180,12 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, bd_vld = vset_lane_u16(rxdp[2].rx.bdtype_vld_udp0, bd_vld, 2); bd_vld = vset_lane_u16(rxdp[3].rx.bdtype_vld_udp0, bd_vld, 3); - /* load 2 mbuf pointer */ - mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); - bd_vld = vshl_n_u16(bd_vld, HNS3_UINT16_BIT - 1 - HNS3_RXD_VLD_B); bd_vld = vreinterpret_u16_s16( vshr_n_s16(vreinterpret_s16_u16(bd_vld), HNS3_UINT16_BIT - 1)); stat = ~vget_lane_u64(vreinterpret_u64_u16(bd_vld), 0); - - /* load 2 mbuf pointer again */ - mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); - if (likely(stat == 0)) bd_valid_num = HNS3_DEFAULT_DESCS_PER_LOOP; else @@ -200,20 +193,20 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, if (bd_valid_num == 0) break; - /* use offset to control below data load oper ordering */ - offset = rxq->offset_table[bd_valid_num]; + /* load 4 mbuf pointer */ + mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); + mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); - /* store 2 mbuf pointer into rx_pkts */ + /* store 4 mbuf pointer into rx_pkts */ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); + vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); - /* read first two descs */ + /* use offset to control below data load oper ordering */ + offset = rxq->offset_table[bd_valid_num]; + + /* read 4 descs */ descs[0] = vld2q_u64((uint64_t *)(rxdp + offset)); descs[1] = vld2q_u64((uint64_t *)(rxdp + offset + 1)); - - /* store 2 mbuf pointer into rx_pkts again */ - vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); - - /* read remains two descs */ descs[2] = vld2q_u64((uint64_t *)(rxdp + offset + 2)); descs[3] = vld2q_u64((uint64_t *)(rxdp + offset + 3)); @@ -221,56 +214,47 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, pkt_mbuf1.val[1] = vreinterpretq_u8_u64(descs[0].val[1]); pkt_mbuf2.val[0] = vreinterpretq_u8_u64(descs[1].val[0]); pkt_mbuf2.val[1] = vreinterpretq_u8_u64(descs[1].val[1]); + pkt_mbuf3.val[0] = vreinterpretq_u8_u64(descs[2].val[0]); + pkt_mbuf3.val[1] = vreinterpretq_u8_u64(descs[2].val[1]); + pkt_mbuf4.val[0] = vreinterpretq_u8_u64(descs[3].val[0]); + pkt_mbuf4.val[1] = vreinterpretq_u8_u64(descs[3].val[1]); - /* pkt 1,2 convert format from desc to pktmbuf */ + /* 4 packets convert format from desc to pktmbuf */ pkt_mb1 = vqtbl2q_u8(pkt_mbuf1, shuf_desc_fields_msk); pkt_mb2 = vqtbl2q_u8(pkt_mbuf2, shuf_desc_fields_msk); + pkt_mb3 = vqtbl2q_u8(pkt_mbuf3, shuf_desc_fields_msk); + pkt_mb4 = vqtbl2q_u8(pkt_mbuf4, shuf_desc_fields_msk); - /* store the first 8 bytes of pkt 1,2 mbuf's rearm_data */ - *(uint64_t *)&sw_ring[pos + 0].mbuf->rearm_data = - rxq->mbuf_initializer; - *(uint64_t *)&sw_ring[pos + 1].mbuf->rearm_data = - rxq->mbuf_initializer; - - /* pkt 1,2 remove crc */ + /* 4 packets remove crc */ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); pkt_mb1 = vreinterpretq_u8_u16(tmp); tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); pkt_mb2 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); + pkt_mb3 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); + pkt_mb4 = vreinterpretq_u8_u16(tmp); - pkt_mbuf3.val[0] = vreinterpretq_u8_u64(descs[2].val[0]); - pkt_mbuf3.val[1] = vreinterpretq_u8_u64(descs[2].val[1]); - pkt_mbuf4.val[0] = vreinterpretq_u8_u64(descs[3].val[0]); - pkt_mbuf4.val[1] = vreinterpretq_u8_u64(descs[3].val[1]); - - /* pkt 3,4 convert format from desc to pktmbuf */ - pkt_mb3 = vqtbl2q_u8(pkt_mbuf3, shuf_desc_fields_msk); - pkt_mb4 = vqtbl2q_u8(pkt_mbuf4, shuf_desc_fields_msk); - - /* pkt 1,2 save to rx_pkts mbuf */ + /* save packet info to rx_pkts mbuf */ vst1q_u8((void *)&sw_ring[pos + 0].mbuf->rx_descriptor_fields1, pkt_mb1); vst1q_u8((void *)&sw_ring[pos + 1].mbuf->rx_descriptor_fields1, pkt_mb2); + vst1q_u8((void *)&sw_ring[pos + 2].mbuf->rx_descriptor_fields1, + pkt_mb3); + vst1q_u8((void *)&sw_ring[pos + 3].mbuf->rx_descriptor_fields1, + pkt_mb4); - /* pkt 3,4 remove crc */ - tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); - pkt_mb3 = vreinterpretq_u8_u16(tmp); - tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); - pkt_mb4 = vreinterpretq_u8_u16(tmp); - - /* store the first 8 bytes of pkt 3,4 mbuf's rearm_data */ + /* store the first 8 bytes of packets mbuf's rearm_data */ + *(uint64_t *)&sw_ring[pos + 0].mbuf->rearm_data = + rxq->mbuf_initializer; + *(uint64_t *)&sw_ring[pos + 1].mbuf->rearm_data = + rxq->mbuf_initializer; *(uint64_t *)&sw_ring[pos + 2].mbuf->rearm_data = rxq->mbuf_initializer; *(uint64_t *)&sw_ring[pos + 3].mbuf->rearm_data = rxq->mbuf_initializer; - /* pkt 3,4 save to rx_pkts mbuf */ - vst1q_u8((void *)&sw_ring[pos + 2].mbuf->rx_descriptor_fields1, - pkt_mb3); - vst1q_u8((void *)&sw_ring[pos + 3].mbuf->rx_descriptor_fields1, - pkt_mb4); - rte_prefetch_non_temporal(rxdp + HNS3_DEFAULT_DESCS_PER_LOOP); parse_retcode = hns3_desc_parse_field(rxq, &sw_ring[pos], diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c index 8bfc3de0492..8aa4448558c 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_sve.c +++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c @@ -20,40 +20,36 @@ #define BD_SIZE 32 #define BD_FIELD_ADDR_OFFSET 0 -#define BD_FIELD_L234_OFFSET 8 -#define BD_FIELD_XLEN_OFFSET 12 -#define BD_FIELD_RSS_OFFSET 16 -#define BD_FIELD_OL_OFFSET 24 #define BD_FIELD_VALID_OFFSET 28 -typedef struct { - uint32_t l234_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP]; - uint32_t ol_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP]; - uint32_t bd_base_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP]; -} HNS3_SVE_KEY_FIELD_S; - static inline uint32_t hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, struct rte_mbuf **rx_pkts, - HNS3_SVE_KEY_FIELD_S *key, + struct hns3_desc *rxdp, uint32_t bd_vld_num) { + uint32_t l234_info, ol_info, bd_base_info; uint32_t retcode = 0; int ret, i; for (i = 0; i < (int)bd_vld_num; i++) { /* init rte_mbuf.rearm_data last 64-bit */ rx_pkts[i]->ol_flags = RTE_MBUF_F_RX_RSS_HASH; - - ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i], - key->l234_info[i]); + rx_pkts[i]->hash.rss = rxdp[i].rx.rss_hash; + rx_pkts[i]->pkt_len = rte_le_to_cpu_16(rxdp[i].rx.pkt_len) - + rxq->crc_len; + rx_pkts[i]->data_len = rx_pkts[i]->pkt_len; + + l234_info = rxdp[i].rx.l234_info; + ol_info = rxdp[i].rx.ol_info; + bd_base_info = rxdp[i].rx.bd_base_info; + ret = hns3_handle_bdinfo(rxq, rx_pkts[i], bd_base_info, l234_info); if (unlikely(ret)) { retcode |= 1u << i; continue; } - rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq, - key->l234_info[i], key->ol_info[i]); + rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info); /* Increment bytes counter */ rxq->basic_stats.bytes += rx_pkts[i]->pkt_len; @@ -77,46 +73,16 @@ hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq, uint16_t nb_pkts, uint64_t *bd_err_mask) { -#define XLEN_ADJUST_LEN 32 -#define RSS_ADJUST_LEN 16 -#define GEN_VLD_U8_ZIP_INDEX svindex_s8(28, -4) uint16_t rx_id = rxq->next_to_use; struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id]; struct hns3_desc *rxdp = &rxq->rx_ring[rx_id]; - struct hns3_desc *rxdp2; - HNS3_SVE_KEY_FIELD_S key_field; + struct hns3_desc *rxdp2, *next_rxdp; uint64_t bd_valid_num; uint32_t parse_retcode; uint16_t nb_rx = 0; int pos, offset; - uint16_t xlen_adjust[XLEN_ADJUST_LEN] = { - 0, 0xffff, 1, 0xffff, /* 1st mbuf: pkt_len and dat_len */ - 2, 0xffff, 3, 0xffff, /* 2st mbuf: pkt_len and dat_len */ - 4, 0xffff, 5, 0xffff, /* 3st mbuf: pkt_len and dat_len */ - 6, 0xffff, 7, 0xffff, /* 4st mbuf: pkt_len and dat_len */ - 8, 0xffff, 9, 0xffff, /* 5st mbuf: pkt_len and dat_len */ - 10, 0xffff, 11, 0xffff, /* 6st mbuf: pkt_len and dat_len */ - 12, 0xffff, 13, 0xffff, /* 7st mbuf: pkt_len and dat_len */ - 14, 0xffff, 15, 0xffff, /* 8st mbuf: pkt_len and dat_len */ - }; - - uint32_t rss_adjust[RSS_ADJUST_LEN] = { - 0, 0xffff, /* 1st mbuf: rss */ - 1, 0xffff, /* 2st mbuf: rss */ - 2, 0xffff, /* 3st mbuf: rss */ - 3, 0xffff, /* 4st mbuf: rss */ - 4, 0xffff, /* 5st mbuf: rss */ - 5, 0xffff, /* 6st mbuf: rss */ - 6, 0xffff, /* 7st mbuf: rss */ - 7, 0xffff, /* 8st mbuf: rss */ - }; - svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_DEFAULT_DESCS_PER_LOOP); - svuint16_t xlen_tbl1 = svld1_u16(PG16_256BIT, xlen_adjust); - svuint16_t xlen_tbl2 = svld1_u16(PG16_256BIT, &xlen_adjust[16]); - svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust); - svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]); /* compile-time verifies the xlen_adjust mask */ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != @@ -126,30 +92,21 @@ hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq, for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP, rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) { - svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init; - svuint64_t xlen1st, xlen2st, rss1st, rss2st; - svuint32_t l234, ol, vld, vld2, xlen, rss; - svuint8_t vld_u8; + svuint64_t mbp1st, mbp2st, mbuf_init; + svuint32_t vld; + svbool_t vld_op; /* calc how many bd valid: part 1 */ vld = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp, svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE)); - vld2 = svlsl_n_u32_z(pg32, vld, - HNS3_UINT32_BIT - 1 - HNS3_RXD_VLD_B); - vld2 = svreinterpret_u32_s32(svasr_n_s32_z(pg32, - svreinterpret_s32_u32(vld2), HNS3_UINT32_BIT - 1)); + vld = svand_n_u32_z(pg32, vld, BIT(HNS3_RXD_VLD_B)); + vld_op = svcmpne_n_u32(pg32, vld, BIT(HNS3_RXD_VLD_B)); + bd_valid_num = svcntp_b32(pg32, svbrkb_b_z(pg32, vld_op)); + if (bd_valid_num == 0) + break; /* load 4 mbuf pointer */ mbp1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos]); - - /* calc how many bd valid: part 2 */ - vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld2), - svreinterpret_u8_s8(GEN_VLD_U8_ZIP_INDEX)); - vld_clz = svnot_u64_z(PG64_64BIT, svreinterpret_u64_u8(vld_u8)); - vld_clz = svclz_u64_z(PG64_64BIT, vld_clz); - svst1_u64(PG64_64BIT, &bd_valid_num, vld_clz); - bd_valid_num /= HNS3_UINT8_BIT; - /* load 4 more mbuf pointer */ mbp2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos + 4]); @@ -159,65 +116,25 @@ hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq, /* store 4 mbuf pointer into rx_pkts */ svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos], mbp1st); - - /* load key field to vector reg */ - l234 = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2, - svindex_u32(BD_FIELD_L234_OFFSET, BD_SIZE)); - ol = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2, - svindex_u32(BD_FIELD_OL_OFFSET, BD_SIZE)); - /* store 4 mbuf pointer into rx_pkts again */ svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos + 4], mbp2st); - /* load datalen, pktlen and rss_hash */ - xlen = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2, - svindex_u32(BD_FIELD_XLEN_OFFSET, BD_SIZE)); - rss = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2, - svindex_u32(BD_FIELD_RSS_OFFSET, BD_SIZE)); - - /* store key field to stash buffer */ - svst1_u32(pg32, (uint32_t *)key_field.l234_info, l234); - svst1_u32(pg32, (uint32_t *)key_field.bd_base_info, vld); - svst1_u32(pg32, (uint32_t *)key_field.ol_info, ol); - - /* sub crc_len for pkt_len and data_len */ - xlen = svreinterpret_u32_u16(svsub_n_u16_z(PG16_256BIT, - svreinterpret_u16_u32(xlen), rxq->crc_len)); - /* init mbuf_initializer */ mbuf_init = svdup_n_u64(rxq->mbuf_initializer); - - /* extract datalen, pktlen and rss from xlen and rss */ - xlen1st = svreinterpret_u64_u16( - svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl1)); - xlen2st = svreinterpret_u64_u16( - svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl2)); - rss1st = svreinterpret_u64_u32( - svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl1)); - rss2st = svreinterpret_u64_u32( - svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl2)); - /* save mbuf_initializer */ svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st, offsetof(struct rte_mbuf, rearm_data), mbuf_init); svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st, offsetof(struct rte_mbuf, rearm_data), mbuf_init); - /* save datalen and pktlen and rss */ - svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st, - offsetof(struct rte_mbuf, pkt_len), xlen1st); - svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st, - offsetof(struct rte_mbuf, hash.rss), rss1st); - svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st, - offsetof(struct rte_mbuf, pkt_len), xlen2st); - svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st, - offsetof(struct rte_mbuf, hash.rss), rss2st); - - rte_prefetch_non_temporal(rxdp + - HNS3_SVE_DEFAULT_DESCS_PER_LOOP); + next_rxdp = rxdp + HNS3_SVE_DEFAULT_DESCS_PER_LOOP; + rte_prefetch_non_temporal(next_rxdp); + rte_prefetch_non_temporal(next_rxdp + 2); + rte_prefetch_non_temporal(next_rxdp + 4); + rte_prefetch_non_temporal(next_rxdp + 6); parse_retcode = hns3_desc_parse_field_sve(rxq, &rx_pkts[pos], - &key_field, bd_valid_num); + &rxdp2[offset], bd_valid_num); if (unlikely(parse_retcode)) (*bd_err_mask) |= ((uint64_t)parse_retcode) << pos; @@ -237,54 +154,6 @@ hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq, return nb_rx; } -static inline void -hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq) -{ -#define REARM_LOOP_STEP_NUM 4 - struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start]; - struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start; - struct hns3_entry *rxep_tmp = rxep; - int i; - - if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, - HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { - rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; - return; - } - - for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM, - rxep_tmp += REARM_LOOP_STEP_NUM) { - svuint64_t prf = svld1_u64(PG64_256BIT, (uint64_t *)rxep_tmp); - svprfd_gather_u64base(PG64_256BIT, prf, SV_PLDL1STRM); - } - - for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM, - rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) { - uint64_t iova[REARM_LOOP_STEP_NUM]; - iova[0] = rte_mbuf_iova_get(rxep[0].mbuf); - iova[1] = rte_mbuf_iova_get(rxep[1].mbuf); - iova[2] = rte_mbuf_iova_get(rxep[2].mbuf); - iova[3] = rte_mbuf_iova_get(rxep[3].mbuf); - svuint64_t siova = svld1_u64(PG64_256BIT, iova); - siova = svadd_n_u64_z(PG64_256BIT, siova, RTE_PKTMBUF_HEADROOM); - svuint64_t ol_base = svdup_n_u64(0); - svst1_scatter_u64offset_u64(PG64_256BIT, - (uint64_t *)&rxdp[0].addr, - svindex_u64(BD_FIELD_ADDR_OFFSET, BD_SIZE), siova); - svst1_scatter_u64offset_u64(PG64_256BIT, - (uint64_t *)&rxdp[0].addr, - svindex_u64(BD_FIELD_OL_OFFSET, BD_SIZE), ol_base); - } - - rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH; - if (rxq->rx_rearm_start >= rxq->nb_rx_desc) - rxq->rx_rearm_start = 0; - - rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH; - - hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH); -} - uint16_t hns3_recv_pkts_vec_sve(void *__restrict rx_queue, struct rte_mbuf **__restrict rx_pkts, @@ -300,7 +169,7 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP); if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) - hns3_rxq_rearm_mbuf_sve(rxq); + hns3_rxq_rearm_mbuf(rxq); if (unlikely(!(rxdp->rx.bd_base_info & rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B)))) @@ -331,52 +200,12 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, break; if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) - hns3_rxq_rearm_mbuf_sve(rxq); + hns3_rxq_rearm_mbuf(rxq); } return nb_rx; } -static inline void -hns3_tx_free_buffers_sve(struct hns3_tx_queue *txq) -{ -#define HNS3_SVE_CHECK_DESCS_PER_LOOP 8 -#define TX_VLD_U8_ZIP_INDEX svindex_u8(0, 4) - svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_CHECK_DESCS_PER_LOOP); - svuint32_t vld, vld2; - svuint8_t vld_u8; - uint64_t vld_all; - struct hns3_desc *tx_desc; - int i; - - /* - * All mbufs can be released only when the VLD bits of all - * descriptors in a batch are cleared. - */ - /* do logical OR operation for all desc's valid field */ - vld = svdup_n_u32(0); - tx_desc = &txq->tx_ring[txq->next_to_clean]; - for (i = 0; i < txq->tx_rs_thresh; i += HNS3_SVE_CHECK_DESCS_PER_LOOP, - tx_desc += HNS3_SVE_CHECK_DESCS_PER_LOOP) { - vld2 = svld1_gather_u32offset_u32(pg32, (uint32_t *)tx_desc, - svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE)); - vld = svorr_u32_z(pg32, vld, vld2); - } - /* shift left and then right to get all valid bit */ - vld = svlsl_n_u32_z(pg32, vld, - HNS3_UINT32_BIT - 1 - HNS3_TXD_VLD_B); - vld = svreinterpret_u32_s32(svasr_n_s32_z(pg32, - svreinterpret_s32_u32(vld), HNS3_UINT32_BIT - 1)); - /* use tbl to compress 32bit-lane to 8bit-lane */ - vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld), TX_VLD_U8_ZIP_INDEX); - /* dump compressed 64bit to variable */ - svst1_u64(PG64_64BIT, &vld_all, svreinterpret_u64_u8(vld_u8)); - if (vld_all > 0) - return; - - hns3_tx_bulk_free_buffers(txq); -} - static inline void hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq, struct rte_mbuf **pkts, @@ -462,7 +291,7 @@ hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue, uint16_t nb_tx = 0; if (txq->tx_bd_ready < txq->tx_free_thresh) - hns3_tx_free_buffers_sve(txq); + hns3_tx_free_buffers(txq); nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); if (unlikely(nb_pkts == 0)) { diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c index e1089b6bd0c..67402a700f4 100644 --- a/drivers/net/hns3/hns3_tm.c +++ b/drivers/net/hns3/hns3_tm.c @@ -1081,21 +1081,6 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev, return -EINVAL; } -static int -hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, - int clear_on_fail, - struct rte_tm_error *error) -{ - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - - rte_spinlock_lock(&hw->lock); - ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); - rte_spinlock_unlock(&hw->lock); - - return ret; -} - static int hns3_tm_node_shaper_do_update(struct hns3_hw *hw, uint32_t node_id, @@ -1195,6 +1180,148 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev, return 0; } +static int +hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_capabilities_get(dev, cap, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority, + weight, level_id, params, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_delete_wrap(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_delete(dev, node_id, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev, + uint32_t node_id, + int *is_leaf, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + static int hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, uint32_t node_id, @@ -1213,14 +1340,14 @@ hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, } static const struct rte_tm_ops hns3_tm_ops = { - .capabilities_get = hns3_tm_capabilities_get, - .shaper_profile_add = hns3_tm_shaper_profile_add, - .shaper_profile_delete = hns3_tm_shaper_profile_del, - .node_add = hns3_tm_node_add, - .node_delete = hns3_tm_node_delete, - .node_type_get = hns3_tm_node_type_get, - .level_capabilities_get = hns3_tm_level_capabilities_get, - .node_capabilities_get = hns3_tm_node_capabilities_get, + .capabilities_get = hns3_tm_capabilities_get_wrap, + .shaper_profile_add = hns3_tm_shaper_profile_add_wrap, + .shaper_profile_delete = hns3_tm_shaper_profile_del_wrap, + .node_add = hns3_tm_node_add_wrap, + .node_delete = hns3_tm_node_delete_wrap, + .node_type_get = hns3_tm_node_type_get_wrap, + .level_capabilities_get = hns3_tm_level_capabilities_get_wrap, + .node_capabilities_get = hns3_tm_node_capabilities_get_wrap, .hierarchy_commit = hns3_tm_hierarchy_commit_wrap, .node_shaper_update = hns3_tm_node_shaper_update_wrap, }; diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c index 9eee104063d..ab655a0a72e 100644 --- a/drivers/net/i40e/base/i40e_common.c +++ b/drivers/net/i40e/base/i40e_common.c @@ -37,6 +37,7 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_10G_B: case I40E_DEV_ID_10G_SFP: case I40E_DEV_ID_5G_BASE_T_BC: + case I40E_DEV_ID_1G_BASE_T_BC: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: case I40E_DEV_ID_25G_B: @@ -54,6 +55,7 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_SFP_X722_A: hw->mac.type = I40E_MAC_X722; break; #if defined(INTEGRATED_VF) || defined(VF_DRIVER) @@ -6910,6 +6912,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC: + case I40E_DEV_ID_1G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: @@ -6946,7 +6949,9 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC: + case I40E_DEV_ID_1G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h index 02ae7be550e..ee31e51f572 100644 --- a/drivers/net/i40e/base/i40e_devids.h +++ b/drivers/net/i40e/base/i40e_devids.h @@ -26,6 +26,7 @@ #define I40E_DEV_ID_XXV710_N3000 0x0D58 #define I40E_DEV_ID_10G_BASE_T_BC 0x15FF #define I40E_DEV_ID_5G_BASE_T_BC 0x101F +#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -48,6 +49,7 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_SFP_X722_A 0x0DDA #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) #define I40E_DEV_ID_X722_VF 0x37CD #endif /* VF_DRIVER */ diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 8271bbb3944..3ca226156b1 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -439,8 +439,10 @@ static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722_A) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -496,6 +498,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .flow_ops_get = i40e_dev_flow_ops_get, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, + .recycle_rxq_info_get = i40e_recycle_rxq_info_get, .rx_burst_mode_get = i40e_rx_burst_mode_get, .tx_burst_mode_get = i40e_tx_burst_mode_get, .timesync_enable = i40e_timesync_enable, @@ -6025,14 +6028,16 @@ i40e_vsi_setup(struct i40e_pf *pf, } } - /* MAC/VLAN configuration */ - rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); - filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; + if (vsi->type != I40E_VSI_FDIR) { + /* MAC/VLAN configuration for non-FDIR VSI*/ + rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; - ret = i40e_vsi_add_mac(vsi, &filter); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); - goto fail_msix_alloc; + ret = i40e_vsi_add_mac(vsi, &filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); + goto fail_msix_alloc; + } } /* Get VSI BW information */ diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 6f65d5e0acb..1bbe7ad3760 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -1355,6 +1355,8 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +void i40e_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_burst_mode *mode); int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, @@ -1492,7 +1494,7 @@ i40e_align_floor(int n) { if (n == 0) return 0; - return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); + return 1 << (sizeof(n) * CHAR_BIT - 1 - rte_clz32(n)); } static inline uint16_t diff --git a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c new file mode 100644 index 00000000000..14424c9921b --- /dev/null +++ b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Arm Limited. + */ + +#include +#include + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#pragma GCC diagnostic ignored "-Wcast-qual" + +void +i40e_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs) +{ + struct i40e_rx_queue *rxq = rx_queue; + struct i40e_rx_entry *rxep; + volatile union i40e_rx_desc *rxdp; + uint16_t rx_id; + uint64_t paddr; + uint64_t dma_addr; + uint16_t i; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + rxep = &rxq->sw_ring[rxq->rxrearm_start]; + + for (i = 0; i < nb_mbufs; i++) { + /* Initialize rxdp descs. */ + paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM; + dma_addr = rte_cpu_to_le_64(paddr); + /* flush desc with pa dma_addr */ + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update the descriptor initializer index */ + rxq->rxrearm_start += nb_mbufs; + rx_id = rxq->rxrearm_start - 1; + + if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) { + rxq->rxrearm_start = 0; + rx_id = rxq->nb_rx_desc - 1; + } + + rxq->rxrearm_nb -= nb_mbufs; + + rte_io_wmb(); + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id); +} + +uint16_t +i40e_recycle_tx_mbufs_reuse_vec(void *tx_queue, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) +{ + struct i40e_tx_queue *txq = tx_queue; + struct i40e_tx_entry *txep; + struct rte_mbuf **rxep; + int i, n; + uint16_t nb_recycle_mbufs; + uint16_t avail = 0; + uint16_t mbuf_ring_size = recycle_rxq_info->mbuf_ring_size; + uint16_t mask = recycle_rxq_info->mbuf_ring_size - 1; + uint16_t refill_requirement = recycle_rxq_info->refill_requirement; + uint16_t refill_head = *recycle_rxq_info->refill_head; + uint16_t receive_tail = *recycle_rxq_info->receive_tail; + + /* Get available recycling Rx buffers. */ + avail = (mbuf_ring_size - (refill_head - receive_tail)) & mask; + + /* Check Tx free thresh and Rx available space. */ + if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh) + return 0; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + nb_recycle_mbufs = n; + + /* Mbufs recycle mode can only support no ring buffer wrapping around. + * Two case for this: + * + * case 1: The refill head of Rx buffer ring needs to be aligned with + * mbuf ring size. In this case, the number of Tx freeing buffers + * should be equal to refill_requirement. + * + * case 2: The refill head of Rx ring buffer does not need to be aligned + * with mbuf ring size. In this case, the update of refill head can not + * exceed the Rx mbuf ring size. + */ + if ((refill_requirement && refill_requirement != n) || + (!refill_requirement && (refill_head + n > mbuf_ring_size))) + return 0; + + /* First buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1). + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + rxep = recycle_rxq_info->mbuf_ring; + rxep += refill_head; + + if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { + /* Avoid txq contains buffers from unexpected mempool. */ + if (unlikely(recycle_rxq_info->mp + != txep[0].mbuf->pool)) + return 0; + + /* Directly put mbufs from Tx to Rx. */ + for (i = 0; i < n; i++) + rxep[i] = txep[i].mbuf; + } else { + for (i = 0; i < n; i++) { + rxep[i] = rte_pktmbuf_prefree_seg(txep[i].mbuf); + + /* If Tx buffers are not the last reference or from + * unexpected mempool, previous copied buffers are + * considered as invalid. + */ + if (unlikely(rxep[i] == NULL || + recycle_rxq_info->mp != txep[i].mbuf->pool)) + nb_recycle_mbufs = 0; + } + /* If Tx buffers are not the last reference or + * from unexpected mempool, all recycled buffers + * are put into mempool. + */ + if (nb_recycle_mbufs == 0) + for (i = 0; i < n; i++) { + if (rxep[i] != NULL) + rte_mempool_put(rxep[i]->pool, rxep[i]); + } + } + + /* Update counters for Tx. */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return nb_recycle_mbufs; +} diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index b4f65b58fa9..9aa5facb530 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1918,6 +1918,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, if (use_def_burst_func) ad->rx_bulk_alloc_allowed = false; i40e_set_rx_function(dev); + + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + return 0; } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" @@ -3199,6 +3205,30 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.offloads = txq->offloads; } +void +i40e_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) +{ + struct i40e_rx_queue *rxq; + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + rxq = dev->data->rx_queues[queue_id]; + + recycle_rxq_info->mbuf_ring = (void *)rxq->sw_ring; + recycle_rxq_info->mp = rxq->mp; + recycle_rxq_info->mbuf_ring_size = rxq->nb_rx_desc; + recycle_rxq_info->receive_tail = &rxq->rx_tail; + + if (ad->rx_vec_allowed) { + recycle_rxq_info->refill_requirement = RTE_I40E_RXQ_REARM_THRESH; + recycle_rxq_info->refill_head = &rxq->rxrearm_start; + } else { + recycle_rxq_info->refill_requirement = rxq->rx_free_thresh; + recycle_rxq_info->refill_head = &rxq->rx_free_trigger; + } +} + #ifdef RTE_ARCH_X86 static inline bool get_avx_supported(bool request_avx512) @@ -3216,15 +3246,9 @@ get_avx_supported(bool request_avx512) #endif } else { if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 && - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) -#ifdef CC_AVX2_SUPPORT + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) return true; -#else - PMD_DRV_LOG(NOTICE, - "AVX2 is not supported in build env"); - return false; -#endif } return false; @@ -3293,6 +3317,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ad->rx_use_avx2 ? i40e_recv_scattered_pkts_vec_avx2 : i40e_recv_scattered_pkts_vec; + dev->recycle_rx_descriptors_refill = + i40e_recycle_rx_descriptors_refill_vec; } } else { if (ad->rx_use_avx512) { @@ -3311,9 +3337,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ad->rx_use_avx2 ? i40e_recv_pkts_vec_avx2 : i40e_recv_pkts_vec; + dev->recycle_rx_descriptors_refill = + i40e_recycle_rx_descriptors_refill_vec; } } #else /* RTE_ARCH_X86 */ + dev->recycle_rx_descriptors_refill = i40e_recycle_rx_descriptors_refill_vec; if (dev->data->scattered_rx) { PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx (port %d).", @@ -3481,15 +3510,18 @@ i40e_set_tx_function(struct rte_eth_dev *dev) dev->tx_pkt_burst = ad->tx_use_avx2 ? i40e_xmit_pkts_vec_avx2 : i40e_xmit_pkts_vec; + dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec; } #else /* RTE_ARCH_X86 */ PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).", dev->data->port_id); dev->tx_pkt_burst = i40e_xmit_pkts_vec; + dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec; #endif /* RTE_ARCH_X86 */ } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_simple; + dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec; } dev->tx_pkt_prepare = i40e_simple_prep_pkts; } else { @@ -3608,7 +3640,7 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) } } -#ifndef CC_AVX2_SUPPORT +#ifndef RTE_ARCH_X86 uint16_t i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3632,4 +3664,4 @@ i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, { return 0; } -#endif /* ifndef CC_AVX2_SUPPORT */ +#endif /* ifndef RTE_ARCH_X86 */ diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index a8686224e5c..b191f23e1fb 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -236,6 +236,10 @@ uint32_t i40e_dev_rx_queue_count(void *rx_queue); int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint16_t i40e_recycle_tx_mbufs_reuse_vec(void *tx_queue, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); +void i40e_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs); + uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c index 4cd78f4e584..b6b0d38ec19 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c +++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c @@ -432,7 +432,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, desc_to_olflags_v(descs, &rx_pkts[pos]); /* C.4 calc available number of desc */ - var = __builtin_popcountll((vec_ld(0, + var = rte_popcount64((vec_ld(0, (__vector unsigned long *)&staterr)[0])); nb_pkts_recd += var; if (likely(var != RTE_I40E_DESCS_PER_LOOP)) diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c index 761edb9d20b..f468c1fd903 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c +++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c @@ -605,9 +605,9 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64( + uint64_t burst = rte_popcount64(_mm_cvtsi128_si64( _mm256_extracti128_si256(status0_7, 1))); - burst += __builtin_popcountll(_mm_cvtsi128_si64( + burst += rte_popcount64(_mm_cvtsi128_si64( _mm256_castsi256_si128(status0_7))); received += burst; if (burst != RTE_I40E_DESCS_PER_LOOP_AVX) diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c index ad0893324d5..f3050cd06ca 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c +++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c @@ -654,11 +654,11 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, status0_7 = _mm256_packs_epi32 (status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll(_mm_cvtsi128_si64 + burst += rte_popcount64(_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; if (burst != RTE_I40E_DESCS_PER_LOOP_AVX) diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h index fe1a6ec75ef..8b745630e4c 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_common.h +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h @@ -201,6 +201,7 @@ i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) rte_compiler_barrier(); p = (uintptr_t)&mb_def.rearm_data; rxq->mbuf_initializer = *(uint64_t *)p; + rxq->rx_using_sse = 1; return 0; } diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c index b94c37cbb82..9200a23ff66 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -579,7 +579,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc available number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + var = rte_popcount64(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != RTE_I40E_DESCS_PER_LOOP)) break; diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build index 8e53b87a65e..80171b9dc6b 100644 --- a/drivers/net/i40e/meson.build +++ b/drivers/net/i40e/meson.build @@ -34,6 +34,7 @@ sources = files( 'i40e_tm.c', 'i40e_hash.c', 'i40e_vf_representor.c', + 'i40e_recycle_mbufs_vec_common.c', 'rte_pmd_i40e.c', ) @@ -49,22 +50,12 @@ if arch_subdir == 'x86' cflags += ['-fno-asynchronous-unwind-tables'] endif - # compile AVX2 version if either: - # a. we have AVX supported in minimum instruction set baseline - # b. it's not minimum instruction set, but supported by compiler - if cc.get_define('__AVX2__', args: machine_args) != '' - cflags += ['-DCC_AVX2_SUPPORT'] - sources += files('i40e_rxtx_vec_avx2.c') - elif cc.has_argument('-mavx2') - cflags += ['-DCC_AVX2_SUPPORT'] - i40e_avx2_lib = static_library('i40e_avx2_lib', - 'i40e_rxtx_vec_avx2.c', - dependencies: [static_rte_ethdev, - static_rte_kvargs, static_rte_hash], - include_directories: includes, - c_args: [cflags, '-mavx2']) - objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c') - endif + i40e_avx2_lib = static_library('i40e_avx2_lib', + 'i40e_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c') i40e_avx512_cpu_support = ( cc.get_define('__AVX512F__', args: machine_args) != '' and diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 98861e42429..04774ce1240 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -277,6 +277,8 @@ struct iavf_info { struct rte_eth_dev *eth_dev; + bool in_reset_recovery; + uint32_t ptp_caps; rte_spinlock_t phc_time_aq_lock; }; @@ -305,6 +307,7 @@ struct iavf_devargs { uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM]; uint16_t quanta_size; uint32_t watchdog_period; + uint8_t auto_reset; }; struct iavf_security_ctx; @@ -426,6 +429,9 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); +void iavf_dev_event_post(struct rte_eth_dev *dev, + enum rte_eth_event_type event, + void *param, size_t param_alloc_size); void iavf_dev_event_handler_fini(void); int iavf_dev_event_handler_init(void); void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); @@ -501,4 +507,5 @@ int iavf_flow_sub_check(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter); void iavf_dev_watchdog_enable(struct iavf_adapter *adapter); void iavf_dev_watchdog_disable(struct iavf_adapter *adapter); +int iavf_handle_hw_reset(struct rte_eth_dev *dev); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index f2fc5a56216..5b2634a4e36 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -37,6 +37,7 @@ #define IAVF_PROTO_XTR_ARG "proto_xtr" #define IAVF_QUANTA_SIZE_ARG "quanta_size" #define IAVF_RESET_WATCHDOG_ARG "watchdog_period" +#define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset" uint64_t iavf_timestamp_dynflag; int iavf_timestamp_dynfield_offset = -1; @@ -45,6 +46,7 @@ static const char * const iavf_valid_args[] = { IAVF_PROTO_XTR_ARG, IAVF_QUANTA_SIZE_ARG, IAVF_RESET_WATCHDOG_ARG, + IAVF_ENABLE_AUTO_RESET_ARG, NULL }; @@ -133,6 +135,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static void iavf_dev_interrupt_handler(void *param); +static void iavf_disable_irq0(struct iavf_hw *hw); static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops); static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, @@ -305,8 +309,8 @@ iavf_dev_watchdog(void *cb_arg) adapter->vf.vf_reset = true; adapter->vf.link_up = false; - rte_eth_dev_callback_process(adapter->vf.eth_dev, - RTE_ETH_EVENT_INTR_RESET, NULL); + iavf_dev_event_post(adapter->vf.eth_dev, RTE_ETH_EVENT_INTR_RESET, + NULL, 0); } } @@ -324,24 +328,31 @@ iavf_dev_watchdog(void *cb_arg) void iavf_dev_watchdog_enable(struct iavf_adapter *adapter) { - if (adapter->devargs.watchdog_period && !adapter->vf.watchdog_enabled) { - PMD_DRV_LOG(INFO, "Enabling device watchdog, period is %dμs", - adapter->devargs.watchdog_period); - adapter->vf.watchdog_enabled = true; - if (rte_eal_alarm_set(adapter->devargs.watchdog_period, - &iavf_dev_watchdog, (void *)adapter)) - PMD_DRV_LOG(ERR, "Failed to enabled device watchdog"); - } else { + if (!adapter->devargs.watchdog_period) { PMD_DRV_LOG(INFO, "Device watchdog is disabled"); + } else { + if (!adapter->vf.watchdog_enabled) { + PMD_DRV_LOG(INFO, "Enabling device watchdog, period is %dμs", + adapter->devargs.watchdog_period); + adapter->vf.watchdog_enabled = true; + if (rte_eal_alarm_set(adapter->devargs.watchdog_period, + &iavf_dev_watchdog, (void *)adapter)) + PMD_DRV_LOG(ERR, "Failed to enable device watchdog"); + } } } void iavf_dev_watchdog_disable(struct iavf_adapter *adapter) { - if (adapter->devargs.watchdog_period && adapter->vf.watchdog_enabled) { - PMD_DRV_LOG(INFO, "Disabling device watchdog"); - adapter->vf.watchdog_enabled = false; + if (!adapter->devargs.watchdog_period) { + PMD_DRV_LOG(INFO, "Device watchdog is not enabled"); + } else { + if (adapter->vf.watchdog_enabled) { + PMD_DRV_LOG(INFO, "Disabling device watchdog"); + adapter->vf.watchdog_enabled = false; + rte_eal_alarm_cancel(&iavf_dev_watchdog, (void *)adapter); + } } } @@ -1092,12 +1103,15 @@ iavf_dev_stop(struct rte_eth_dev *dev) /* Rx interrupt vector mapping free */ rte_intr_vec_list_free(intr_handle); - /* remove all mac addrs */ - iavf_add_del_all_mac_addr(adapter, false); + /* adminq will be disabled when vf is resetting. */ + if (!vf->in_reset_recovery) { + /* remove all mac addrs */ + iavf_add_del_all_mac_addr(adapter, false); - /* remove all multicast addresses */ - iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, + /* remove all multicast addresses */ + iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); + } iavf_stop_queues(dev); @@ -1127,7 +1141,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = vf->vf_res->rss_lut_size; dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL; dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX; - dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP | RTE_ETH_RX_OFFLOAD_QINQ_STRIP | @@ -1362,6 +1378,7 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; if (adapter->closed) @@ -1380,6 +1397,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) err = iavf_add_del_vlan(adapter, vlan_id, on); if (err) return -EIO; + + /* For i40e kernel driver which only supports vlan(v1) VIRTCHNL OP, + * it will set strip on when setting filter on but dpdk side will not + * change strip flag. To be consistent with dpdk side, disable strip + * again. + * + * For i40e kernel driver which supports vlan v2, dpdk will invoke vlan v2 + * related function, so it won't go through here. + */ + if (adapter->hw.mac.type == IAVF_MAC_XL710 || + adapter->hw.mac.type == IAVF_MAC_X722_VF) { + if (on && !(dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) { + err = iavf_disable_vlan_strip(adapter); + if (err) + return -EIO; + } + } return 0; } @@ -2210,6 +2244,26 @@ parse_u16(__rte_unused const char *key, const char *value, void *args) return 0; } +static int +parse_bool(const char *key, const char *value, void *args) +{ + int *i = (int *)args; + char *end; + int num; + + num = strtoul(value, &end, 10); + + if (num != 0 && num != 1) { + PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " + "value must be 0 or 1", + value, key); + return -1; + } + + *i = num; + return 0; +} + static int iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void *args) { @@ -2278,6 +2332,11 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev) goto bail; } + ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG, + &parse_bool, &ad->devargs.auto_reset); + if (ret) + goto bail; + bail: rte_kvargs_free(kvlist); return ret; @@ -2709,18 +2768,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) ret = iavf_security_ctx_create(adapter); if (ret) { PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); - return ret; + goto flow_init_err; } ret = iavf_security_init(adapter); if (ret) { PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources"); - return ret; + goto security_init_err; } } iavf_default_rss_disable(adapter); + iavf_dev_stats_reset(eth_dev); /* Start device watchdog */ iavf_dev_watchdog_enable(adapter); @@ -2728,7 +2788,23 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) return 0; +security_init_err: + iavf_security_ctx_destroy(adapter); + flow_init_err: + iavf_disable_irq0(hw); + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable uio intr before callback unregiser */ + rte_intr_disable(pci_dev->intr_handle); + + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(pci_dev->intr_handle, + iavf_dev_interrupt_handler, eth_dev); + } else { + rte_eal_alarm_cancel(iavf_dev_alarm_handler, eth_dev); + } + rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -2841,12 +2917,15 @@ iavf_dev_close(struct rte_eth_dev *dev) static int iavf_dev_uninit(struct rte_eth_dev *dev) { + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; iavf_dev_close(dev); - iavf_dev_event_handler_fini(); + if (!vf->in_reset_recovery) + iavf_dev_event_handler_fini(); return 0; } @@ -2859,6 +2938,7 @@ iavf_dev_reset(struct rte_eth_dev *dev) { int ret; struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); /* * Check whether the VF reset has been done and inform application, @@ -2870,6 +2950,7 @@ iavf_dev_reset(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "Wait too long for reset done!\n"); return ret; } + vf->vf_reset = false; PMD_DRV_LOG(DEBUG, "Start dev_reset ...\n"); ret = iavf_dev_uninit(dev); @@ -2879,6 +2960,43 @@ iavf_dev_reset(struct rte_eth_dev *dev) return iavf_dev_init(dev); } +/* + * Handle hardware reset + */ +int +iavf_handle_hw_reset(struct rte_eth_dev *dev) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + vf->in_reset_recovery = true; + + ret = iavf_dev_reset(dev); + if (ret) + goto error; + + /* VF states restore */ + ret = iavf_dev_configure(dev); + if (ret) + goto error; + + iavf_dev_xstats_reset(dev); + + /* start the device */ + ret = iavf_dev_start(dev); + if (ret) + goto error; + dev->data->dev_started = 1; + + vf->in_reset_recovery = false; + return 0; + +error: + PMD_DRV_LOG(DEBUG, "RESET recover with error code=%d\n", ret); + vf->in_reset_recovery = false; + return ret; +} + static int iavf_dcf_cap_check_handler(__rte_unused const char *key, const char *value, __rte_unused void *opaque) diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c index cf4d6771019..217f0500aba 100644 --- a/drivers/net/iavf/iavf_hash.c +++ b/drivers/net/iavf/iavf_hash.c @@ -1398,7 +1398,7 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func, /* check invalid combination */ for (i = 0; i < RTE_DIM(invalid_rss_comb); i++) { - if (__builtin_popcountll(rss_type & invalid_rss_comb[i]) > 1) + if (rte_popcount64(rss_type & invalid_rss_comb[i]) > 1) return true; } diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c index 408b410f15c..07a69db5402 100644 --- a/drivers/net/iavf/iavf_ipsec_crypto.c +++ b/drivers/net/iavf/iavf_ipsec_crypto.c @@ -828,6 +828,7 @@ iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter, /* set request params */ request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx; request->ipsec_data.sa_update->esn_hi = sess->esn.hi; + request->ipsec_data.sa_update->esn_low = sess->esn.low; /* send virtual channel request to add SA to hardware database */ rc = iavf_ipsec_crypto_request(adapter, diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index f7df4665d1b..c6ef6af1d8b 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -755,6 +755,13 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (check_rx_vec_allow(rxq) == false) ad->rx_vec_allowed = false; +#if defined RTE_ARCH_X86 || defined RTE_ARCH_ARM + /* check vector conflict */ + if (ad->rx_vec_allowed && iavf_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } +#endif return 0; } @@ -1094,15 +1101,44 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) rte_free(q); } +static void +iavf_reset_queues(struct rte_eth_dev *dev) +{ + struct iavf_rx_queue *rxq; + struct iavf_tx_queue *txq; + int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } +} + void iavf_stop_queues(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct iavf_rx_queue *rxq; - struct iavf_tx_queue *txq; - int ret, i; + int ret; + + /* adminq will be disabled when vf is resetting. */ + if (vf->in_reset_recovery) { + iavf_reset_queues(dev); + return; + } /* Stop All queues */ if (!vf->lv_enabled) { @@ -1118,22 +1154,7 @@ iavf_stop_queues(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(WARNING, "Fail to stop queues"); - for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - if (!txq) - continue; - iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq); - reset_tx_queue(txq); - dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; - } - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - if (!rxq) - continue; - iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq); - reset_rx_queue(rxq); - dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; - } + iavf_reset_queues(dev); } #define IAVF_RX_FLEX_ERR0_BITS \ @@ -2529,7 +2550,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field, total_length -= m->outer_l3_len + m->outer_l2_len; } -#ifdef RTE_LIBRTE_IAVF_DEBUG_TX +#ifdef RTE_ETHDEV_DEBUG_TX if (!m->l4_len || !m->tso_segsz) PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d", m->l4_len, m->tso_segsz); @@ -2643,6 +2664,9 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, l2tag1 |= m->vlan_tci; } + if ((m->ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK) == 0) + goto skip_cksum; + /* Set MACLEN */ if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK && !(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) @@ -2702,6 +2726,7 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, break; } +skip_cksum: *qw1 = rte_cpu_to_le_64((((uint64_t)command << IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & @@ -3611,7 +3636,6 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, struct rte_mbuf *m; struct iavf_tx_queue *txq = tx_queue; struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; - uint16_t max_frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD; struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -3640,11 +3664,8 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } - /* check the data_len in mbuf */ - if (m->data_len < IAVF_TX_MIN_PKT_LEN || - m->data_len > max_frame_size) { + if (m->pkt_len < IAVF_TX_MIN_PKT_LEN) { rte_errno = EINVAL; - PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); return i; } diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c index c10f24036ec..510b4d8f1c7 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -480,11 +480,11 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; @@ -1388,11 +1388,11 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c index 3e66df5341c..7a7df6d2582 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c @@ -539,11 +539,11 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; @@ -1544,11 +1544,11 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; @@ -2460,20 +2460,19 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq) { unsigned int i; const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); + const uint16_t end_desc = txq->tx_tail >> txq->use_ctx; /* next empty slot */ + const uint16_t wrap_point = txq->nb_tx_desc >> txq->use_ctx; /* end of SW ring */ struct iavf_tx_vec_entry *swr = (void *)txq->sw_ring; if (!txq->sw_ring || txq->nb_free == max_desc) return; - i = (txq->next_dd >> txq->use_ctx) + 1 - - (txq->rs_thresh >> txq->use_ctx); - - if (txq->tx_tail < i) { - for (; i < (unsigned int)(txq->nb_tx_desc >> txq->use_ctx); i++) { - rte_pktmbuf_free_seg(swr[i].mbuf); - swr[i].mbuf = NULL; - } - i = 0; + i = (txq->next_dd - txq->rs_thresh + 1) >> txq->use_ctx; + while (i != end_desc) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + if (++i == wrap_point) + i = 0; } } diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h index ddb13ce8c36..e18cdc3f11c 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_common.h +++ b/drivers/net/iavf/iavf_rxtx_vec_common.h @@ -186,12 +186,11 @@ _iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq) return; i = txq->next_dd - txq->rs_thresh + 1; - if (txq->tx_tail < i) { - for (; i < txq->nb_tx_desc; i++) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; - } - i = 0; + while (i != txq->tx_tail) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + if (++i == txq->nb_tx_desc) + i = 0; } } @@ -396,7 +395,7 @@ iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt, *txd_hi |= ((uint64_t)td_cmd) << IAVF_TXD_QW1_CMD_SHIFT; } -#ifdef CC_AVX2_SUPPORT +#ifdef RTE_ARCH_X86 static __rte_always_inline void iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512) { diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c index 892bfa4cf33..96f187f511e 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -695,7 +695,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc available number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + var = rte_popcount64(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != IAVF_VPMD_DESCS_PER_LOOP)) break; @@ -1122,7 +1122,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, pkt_mb0); flex_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc available number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + var = rte_popcount64(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 524732f67d3..7f49eb2c1e9 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -79,6 +79,15 @@ iavf_dev_event_handle(void *param __rte_unused) struct iavf_event_element *pos, *save_next; TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) { TAILQ_REMOVE(&pending, pos, next); + + struct iavf_adapter *adapter = pos->dev->data->dev_private; + if (pos->event == RTE_ETH_EVENT_INTR_RESET && + adapter->devargs.auto_reset) { + iavf_handle_hw_reset(pos->dev); + rte_free(pos); + continue; + } + rte_eth_dev_callback_process(pos->dev, pos->event, pos->param); rte_free(pos); } @@ -87,7 +96,7 @@ iavf_dev_event_handle(void *param __rte_unused) return 0; } -static void +void iavf_dev_event_post(struct rte_eth_dev *dev, enum rte_eth_event_type event, void *param, size_t param_alloc_size) @@ -135,8 +144,8 @@ iavf_dev_event_handler_init(void) TAILQ_INIT(&handler->pending); pthread_mutex_init(&handler->lock, NULL); - if (rte_thread_create_control(&handler->tid, "iavf-event-thread", - NULL, iavf_dev_event_handle, NULL)) { + if (rte_thread_create_internal_control(&handler->tid, "iavf-event", + iavf_dev_event_handle, NULL)) { __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED); return -1; } diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index fc09ffa2ae1..a6ce2725c3b 100644 --- a/drivers/net/iavf/meson.build +++ b/drivers/net/iavf/meson.build @@ -29,22 +29,12 @@ if arch_subdir == 'x86' cflags += ['-fno-asynchronous-unwind-tables'] endif - # compile AVX2 version if either: - # a. we have AVX supported in minimum instruction set baseline - # b. it's not minimum instruction set, but supported by compiler - if cc.get_define('__AVX2__', args: machine_args) != '' - cflags += ['-DCC_AVX2_SUPPORT'] - sources += files('iavf_rxtx_vec_avx2.c') - elif cc.has_argument('-mavx2') - cflags += ['-DCC_AVX2_SUPPORT'] - iavf_avx2_lib = static_library('iavf_avx2_lib', - 'iavf_rxtx_vec_avx2.c', - dependencies: [static_rte_ethdev, - static_rte_kvargs, static_rte_hash], - include_directories: includes, - c_args: [cflags, '-mavx2']) - objs += iavf_avx2_lib.extract_objects('iavf_rxtx_vec_avx2.c') - endif + iavf_avx2_lib = static_library('iavf_avx2_lib', + 'iavf_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += iavf_avx2_lib.extract_objects('iavf_rxtx_vec_avx2.c') iavf_avx512_cpu_support = ( cc.get_define('__AVX512F__', args: machine_args) != '' and diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c index f2ddbd7b9b2..63a525b3631 100644 --- a/drivers/net/ice/ice_acl_filter.c +++ b/drivers/net/ice/ice_acl_filter.c @@ -41,15 +41,13 @@ ICE_ACL_INSET_ETH_IPV4 | \ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) -static struct ice_flow_parser ice_acl_parser; - struct acl_rule { enum ice_fltr_ptype flow_type; uint64_t entry_id[4]; }; static struct -ice_pattern_match_item ice_acl_pattern[] = { +ice_pattern_match_item ice_acl_supported_pattern[] = { {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, @@ -993,10 +991,6 @@ ice_acl_init(struct ice_adapter *ad) int ret = 0; struct ice_pf *pf = &ad->pf; struct ice_hw *hw = ICE_PF_TO_HW(pf); - struct ice_flow_parser *parser = &ice_acl_parser; - - if (!ad->hw.dcf_enabled) - return 0; ret = ice_acl_prof_alloc(hw); if (ret) { @@ -1013,11 +1007,7 @@ ice_acl_init(struct ice_adapter *ad) if (ret) return ret; - ret = ice_acl_prof_init(pf); - if (ret) - return ret; - - return ice_register_parser(parser, ad); + return ice_acl_prof_init(pf); } static void @@ -1040,10 +1030,8 @@ ice_acl_uninit(struct ice_adapter *ad) { struct ice_pf *pf = &ad->pf; struct ice_hw *hw = ICE_PF_TO_HW(pf); - struct ice_flow_parser *parser = &ice_acl_parser; if (ad->hw.dcf_enabled) { - ice_unregister_parser(parser, ad); ice_deinit_acl(pf); ice_acl_prof_free(hw); } @@ -1059,11 +1047,11 @@ ice_flow_engine ice_acl_engine = { .type = ICE_FLOW_ENGINE_ACL, }; -static struct +struct ice_flow_parser ice_acl_parser = { .engine = &ice_acl_engine, - .array = ice_acl_pattern, - .array_len = RTE_DIM(ice_acl_pattern), + .array = ice_acl_supported_pattern, + .array_len = RTE_DIM(ice_acl_supported_pattern), .parse_pattern_action = ice_acl_parse, .stage = ICE_FLOW_STAGE_DISTRIBUTOR, }; diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 30ad18d8fc2..065ec728c2d 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -670,7 +670,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; struct rte_intr_handle *intr_handle = dev->intr_handle; struct ice_adapter *ad = &dcf_ad->parent; - struct ice_dcf_hw *hw = &dcf_ad->real_hw; if (ad->pf.adapter_stopped == 1) { PMD_DRV_LOG(DEBUG, "Port is already stopped"); @@ -697,7 +696,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ad->pf.adapter_stopped = 1; - hw->tm_conf.committed = false; return 0; } diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c index d62837840d9..6e845f458ac 100644 --- a/drivers/net/ice/ice_dcf_parent.c +++ b/drivers/net/ice/ice_dcf_parent.c @@ -165,9 +165,8 @@ ice_dcf_vsi_update_service_handler(void *param) static void start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id) { -#define THREAD_NAME_LEN 16 struct ice_dcf_reset_event_param *param; - char name[THREAD_NAME_LEN]; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; rte_thread_t thread; int ret; @@ -181,8 +180,8 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id) param->vfr = vfr; param->vf_id = vf_id; - snprintf(name, sizeof(name), "ice-reset-%u", vf_id); - ret = rte_thread_create_control(&thread, name, NULL, + snprintf(name, sizeof(name), "ice-rst%u", vf_id); + ret = rte_thread_create_internal_control(&thread, name, ice_dcf_vsi_update_service_handler, param); if (ret != 0) { PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling"); @@ -475,6 +474,9 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev) if (ice_devargs_check(eth_dev->device->devargs, ICE_DCF_DEVARG_ACL)) parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_ACL); + parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_FDIR); + parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_HASH); + err = ice_flow_init(parent_adapter); if (err) { PMD_INIT_LOG(ERR, "Failed to initialize flow"); diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c index a231c1e60b2..b08bc5f1dee 100644 --- a/drivers/net/ice/ice_dcf_sched.c +++ b/drivers/net/ice/ice_dcf_sched.c @@ -237,6 +237,7 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; struct ice_dcf_tm_shaper_profile *shaper_profile = NULL; struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_adapter *ad = &adapter->parent; struct ice_dcf_hw *hw = &adapter->real_hw; struct ice_dcf_tm_node *parent_node; struct ice_dcf_tm_node *tm_node; @@ -246,10 +247,10 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, if (!params || !error) return -EINVAL; - /* if already committed */ - if (hw->tm_conf.committed) { + /* if port is running */ + if (!ad->pf.adapter_stopped) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - error->message = "already committed"; + error->message = "port is running"; return -EINVAL; } @@ -400,16 +401,17 @@ ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id, { enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_adapter *ad = &adapter->parent; struct ice_dcf_hw *hw = &adapter->real_hw; struct ice_dcf_tm_node *tm_node; if (!error) return -EINVAL; - /* if already committed */ - if (hw->tm_conf.committed) { + /* if port is running */ + if (!ad->pf.adapter_stopped) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - error->message = "already committed"; + error->message = "port is running"; return -EINVAL; } diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 4bad39c2c1c..305077e74e9 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -27,7 +27,6 @@ /* devargs */ #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" -#define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" #define ICE_DEFAULT_MAC_DISABLE "default-mac-disable" #define ICE_PROTO_XTR_ARG "proto_xtr" #define ICE_FIELD_OFFS_ARG "field_offs" @@ -43,7 +42,6 @@ int ice_timestamp_dynfield_offset = -1; static const char * const ice_valid_args[] = { ICE_SAFE_MODE_SUPPORT_ARG, - ICE_PIPELINE_MODE_SUPPORT_ARG, ICE_PROTO_XTR_ARG, ICE_FIELD_OFFS_ARG, ICE_FIELD_NAME_ARG, @@ -2103,11 +2101,6 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) goto bail; - ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG, - &parse_bool, &ad->devargs.pipe_mode_support); - if (ret) - goto bail; - ret = rte_kvargs_process(kvlist, ICE_DEFAULT_MAC_DISABLE, &parse_bool, &ad->devargs.default_mac_disable); if (ret) @@ -2449,6 +2442,7 @@ ice_dev_init(struct rte_eth_dev *dev) } if (!ad->is_safe_mode) { + ad->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_ACL); ret = ice_flow_init(ad); if (ret) { PMD_INIT_LOG(ERR, "Failed to initialize flow"); @@ -3646,6 +3640,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) if (link_status.link_info & ICE_AQ_LINK_UP) pf->init_link_up = true; + else + pf->init_link_up = false; } static int @@ -6549,7 +6545,6 @@ RTE_PMD_REGISTER_PARAM_STRING(net_ice, ICE_HW_DEBUG_MASK_ARG "=0xXXX" ICE_PROTO_XTR_ARG "=[queue:]" ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" - ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>" ICE_DEFAULT_MAC_DISABLE "=<0|1>" ICE_RX_LOW_LATENCY_ARG "=<0|1>"); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index f925231f340..abe6dcdc232 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -541,9 +541,6 @@ struct ice_pf { bool adapter_stopped; struct ice_flow_list flow_list; rte_spinlock_t flow_ops_lock; - struct ice_parser_list rss_parser_list; - struct ice_parser_list perm_parser_list; - struct ice_parser_list dist_parser_list; bool init_link_up; uint64_t old_rx_bytes; uint64_t old_tx_bytes; @@ -563,7 +560,6 @@ struct ice_devargs { int rx_low_latency; int safe_mode_support; uint8_t proto_xtr_dflt; - int pipe_mode_support; uint8_t default_mac_disable; uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; uint8_t pin_idx; @@ -692,7 +688,7 @@ ice_align_floor(int n) { if (n == 0) return 0; - return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); + return 1 << (sizeof(n) * CHAR_BIT - 1 - rte_clz32(n)); } #define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \ diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index e8842bc2426..0b7920ad447 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -106,7 +106,7 @@ ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \ ICE_INSET_NAT_T_ESP_SPI) -static struct ice_pattern_match_item ice_fdir_pattern_list[] = { +static struct ice_pattern_match_item ice_fdir_supported_pattern[] = { {pattern_raw, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, @@ -137,8 +137,6 @@ static struct ice_pattern_match_item ice_fdir_pattern_list[] = { {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE}, }; -static struct ice_flow_parser ice_fdir_parser; - static int ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type); @@ -1147,34 +1145,18 @@ static int ice_fdir_init(struct ice_adapter *ad) { struct ice_pf *pf = &ad->pf; - struct ice_flow_parser *parser; - int ret; - - if (ad->hw.dcf_enabled) - return 0; - ret = ice_fdir_setup(pf); - if (ret) - return ret; - - parser = &ice_fdir_parser; - - return ice_register_parser(parser, ad); + return ice_fdir_setup(pf); } static void ice_fdir_uninit(struct ice_adapter *ad) { - struct ice_flow_parser *parser; struct ice_pf *pf = &ad->pf; if (ad->hw.dcf_enabled) return; - parser = &ice_fdir_parser; - - ice_unregister_parser(parser, ad); - ice_fdir_teardown(pf); } @@ -2467,7 +2449,7 @@ ice_fdir_parse(struct ice_adapter *ad, item = ice_search_pattern_match_item(ad, pattern, array, array_len, error); - if (!ad->devargs.pipe_mode_support && priority >= 1) + if (priority >= 1) return -rte_errno; if (!item) @@ -2510,10 +2492,10 @@ ice_fdir_parse(struct ice_adapter *ad, return ret; } -static struct ice_flow_parser ice_fdir_parser = { +struct ice_flow_parser ice_fdir_parser = { .engine = &ice_fdir_engine, - .array = ice_fdir_pattern_list, - .array_len = RTE_DIM(ice_fdir_pattern_list), + .array = ice_fdir_supported_pattern, + .array_len = RTE_DIM(ice_fdir_supported_pattern), .parse_pattern_action = ice_fdir_parse, .stage = ICE_FLOW_STAGE_DISTRIBUTOR, }; diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 91bf1d6fcb5..50d760004f9 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -18,16 +18,6 @@ #include "ice_ethdev.h" #include "ice_generic_flow.h" -/** - * Non-pipeline mode, fdir and switch both used as distributor, - * fdir used first, switch used as fdir's backup. - */ -#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0 -/*Pipeline mode, switch used at permission stage*/ -#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1 -/*Pipeline mode, fdir used at distributor stage*/ -#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2 - #define ICE_FLOW_ENGINE_DISABLED(mask, type) ((mask) & BIT(type)) static struct ice_engine_list engine_list = @@ -1803,15 +1793,13 @@ enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = { RTE_FLOW_ITEM_TYPE_END, }; - - -typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad, - struct rte_flow *flow, - struct ice_parser_list *parser_list, - uint32_t priority, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error); +typedef bool (*parse_engine_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct ice_flow_parser *parser, + uint32_t priority, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); void ice_register_flow_engine(struct ice_flow_engine *engine) @@ -1828,9 +1816,6 @@ ice_flow_init(struct ice_adapter *ad) struct ice_flow_engine *engine; TAILQ_INIT(&pf->flow_list); - TAILQ_INIT(&pf->rss_parser_list); - TAILQ_INIT(&pf->perm_parser_list); - TAILQ_INIT(&pf->dist_parser_list); rte_spinlock_init(&pf->flow_ops_lock); if (ice_parser_create(&ad->hw, &ad->psr) != ICE_SUCCESS) @@ -1871,7 +1856,6 @@ ice_flow_uninit(struct ice_adapter *ad) struct ice_pf *pf = &ad->pf; struct ice_flow_engine *engine; struct rte_flow *p_flow; - struct ice_flow_parser_node *p_parser; void *temp; RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { @@ -1892,134 +1876,15 @@ ice_flow_uninit(struct ice_adapter *ad) rte_free(p_flow); } - /* Cleanup parser list */ - while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) { - TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node); - rte_free(p_parser); - } - - while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) { - TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node); - rte_free(p_parser); - } - - while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) { - TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); - rte_free(p_parser); - } - if (ad->psr != NULL) { ice_parser_destroy(ad->psr); ad->psr = NULL; } } -static struct ice_parser_list * -ice_get_parser_list(struct ice_flow_parser *parser, - struct ice_adapter *ad) -{ - struct ice_parser_list *list; - struct ice_pf *pf = &ad->pf; - - switch (parser->stage) { - case ICE_FLOW_STAGE_RSS: - list = &pf->rss_parser_list; - break; - case ICE_FLOW_STAGE_PERMISSION: - list = &pf->perm_parser_list; - break; - case ICE_FLOW_STAGE_DISTRIBUTOR: - list = &pf->dist_parser_list; - break; - default: - return NULL; - } - - return list; -} - -int -ice_register_parser(struct ice_flow_parser *parser, - struct ice_adapter *ad) -{ - struct ice_parser_list *list; - struct ice_flow_parser_node *parser_node; - struct ice_flow_parser_node *existing_node; - void *temp; - - parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0); - if (parser_node == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate memory."); - return -ENOMEM; - } - parser_node->parser = parser; - - list = ice_get_parser_list(parser, ad); - if (list == NULL) - return -EINVAL; - - if (ad->devargs.pipe_mode_support) { - TAILQ_INSERT_TAIL(list, parser_node, node); - } else { - if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH) { - RTE_TAILQ_FOREACH_SAFE(existing_node, list, - node, temp) { - if (existing_node->parser->engine->type == - ICE_FLOW_ENGINE_ACL) { - TAILQ_INSERT_AFTER(list, existing_node, - parser_node, node); - goto DONE; - } - } - TAILQ_INSERT_HEAD(list, parser_node, node); - } else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) { - RTE_TAILQ_FOREACH_SAFE(existing_node, list, - node, temp) { - if (existing_node->parser->engine->type == - ICE_FLOW_ENGINE_SWITCH) { - TAILQ_INSERT_AFTER(list, existing_node, - parser_node, node); - goto DONE; - } - } - TAILQ_INSERT_HEAD(list, parser_node, node); - } else if (parser->engine->type == ICE_FLOW_ENGINE_HASH) { - TAILQ_INSERT_TAIL(list, parser_node, node); - } else if (parser->engine->type == ICE_FLOW_ENGINE_ACL) { - TAILQ_INSERT_HEAD(list, parser_node, node); - } else { - return -EINVAL; - } - } -DONE: - return 0; -} - -void -ice_unregister_parser(struct ice_flow_parser *parser, - struct ice_adapter *ad) -{ - struct ice_parser_list *list; - struct ice_flow_parser_node *p_parser; - void *temp; - - list = ice_get_parser_list(parser, ad); - if (list == NULL) - return; - - RTE_TAILQ_FOREACH_SAFE(p_parser, list, node, temp) { - if (p_parser->parser->engine->type == parser->engine->type) { - TAILQ_REMOVE(list, p_parser, node); - rte_free(p_parser); - } - } -} - static int -ice_flow_valid_attr(struct ice_adapter *ad, - const struct rte_flow_attr *attr, - int *ice_pipeline_stage, - struct rte_flow_error *error) +ice_flow_valid_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) { /* Must be input direction */ if (!attr->ingress) { @@ -2045,30 +1910,10 @@ ice_flow_valid_attr(struct ice_adapter *ad, return -rte_errno; } - /* Check pipeline mode support to set classification stage */ - if (ad->devargs.pipe_mode_support) { - if (attr->priority == 0) - *ice_pipeline_stage = - ICE_FLOW_CLASSIFY_STAGE_PERMISSION; - else - *ice_pipeline_stage = - ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR; - } else { - *ice_pipeline_stage = - ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY; - if (attr->priority > 1) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - attr, "Only support priority 0 and 1."); - return -rte_errno; - } - } - - /* Not supported */ - if (attr->group) { + if (attr->priority > 1) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - attr, "Not support group."); + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Only support priority 0 and 1."); return -rte_errno; } @@ -2333,64 +2178,73 @@ ice_search_pattern_match_item(struct ice_adapter *ad, return NULL; } -static struct ice_flow_engine * +static bool ice_parse_engine_create(struct ice_adapter *ad, struct rte_flow *flow, - struct ice_parser_list *parser_list, + struct ice_flow_parser *parser, uint32_t priority, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct ice_flow_engine *engine = NULL; - struct ice_flow_parser_node *parser_node; void *meta = NULL; - void *temp; - RTE_TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { - int ret; + if (ICE_FLOW_ENGINE_DISABLED(ad->disabled_engine_mask, + parser->engine->type)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "engine is not enabled."); + return false; + } - if (parser_node->parser->parse_pattern_action(ad, - parser_node->parser->array, - parser_node->parser->array_len, - pattern, actions, priority, &meta, error) < 0) - continue; + if (parser->parse_pattern_action(ad, + parser->array, + parser->array_len, + pattern, actions, priority, &meta, error) < 0) + return false; - engine = parser_node->parser->engine; - RTE_ASSERT(engine->create != NULL); - ret = engine->create(ad, flow, meta, error); - if (ret == 0) - return engine; - else if (ret == -EEXIST) - return NULL; - } - return NULL; + RTE_ASSERT(parser->engine->create != NULL); + + return parser->engine->create(ad, flow, meta, error) == 0; } -static struct ice_flow_engine * +static bool ice_parse_engine_validate(struct ice_adapter *ad, struct rte_flow *flow __rte_unused, - struct ice_parser_list *parser_list, + struct ice_flow_parser *parser, uint32_t priority, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct ice_flow_engine *engine = NULL; - struct ice_flow_parser_node *parser_node; - void *temp; - RTE_TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { - if (parser_node->parser->parse_pattern_action(ad, - parser_node->parser->array, - parser_node->parser->array_len, - pattern, actions, priority, NULL, error) < 0) - continue; + if (ICE_FLOW_ENGINE_DISABLED(ad->disabled_engine_mask, + parser->engine->type)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "engine is not enabled."); + return false; + } - engine = parser_node->parser->engine; - break; + return parser->parse_pattern_action(ad, + parser->array, + parser->array_len, + pattern, actions, priority, + NULL, error) >= 0; +} + +static struct ice_flow_parser *get_flow_parser(uint32_t group) +{ + switch (group) { + case 0: + return &ice_switch_parser; + case 1: + return &ice_acl_parser; + case 2: + return &ice_fdir_parser; + default: + return NULL; } - return engine; } static int @@ -2406,8 +2260,7 @@ ice_flow_process_filter(struct rte_eth_dev *dev, int ret = ICE_ERR_NOT_SUPPORTED; struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int ice_pipeline_stage = 0; + struct ice_flow_parser *parser; if (!pattern) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, @@ -2429,33 +2282,34 @@ ice_flow_process_filter(struct rte_eth_dev *dev, return -rte_errno; } - ret = ice_flow_valid_attr(ad, attr, &ice_pipeline_stage, error); + ret = ice_flow_valid_attr(attr, error); if (ret) return ret; - *engine = ice_parse_engine(ad, flow, &pf->rss_parser_list, - attr->priority, pattern, actions, error); - if (*engine != NULL) + *engine = NULL; + /* always try hash engine first */ + if (ice_parse_engine(ad, flow, &ice_hash_parser, + attr->priority, pattern, + actions, error)) { + *engine = ice_hash_parser.engine; return 0; - - switch (ice_pipeline_stage) { - case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY: - case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR: - *engine = ice_parse_engine(ad, flow, &pf->dist_parser_list, - attr->priority, pattern, actions, error); - break; - case ICE_FLOW_CLASSIFY_STAGE_PERMISSION: - *engine = ice_parse_engine(ad, flow, &pf->perm_parser_list, - attr->priority, pattern, actions, error); - break; - default: - return -EINVAL; } - if (*engine == NULL) - return -EINVAL; + parser = get_flow_parser(attr->group); + if (parser == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } - return 0; + if (ice_parse_engine(ad, flow, parser, attr->priority, + pattern, actions, error)) { + *engine = parser->engine; + return 0; + } else { + return -rte_errno; + } } static int diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h index 11f51a5c151..391d615b9a8 100644 --- a/drivers/net/ice/ice_generic_flow.h +++ b/drivers/net/ice/ice_generic_flow.h @@ -418,15 +418,13 @@ enum ice_flow_engine_type { }; /** - * classification stages. - * for non-pipeline mode, we have two classification stages: Distributor/RSS - * for pipeline-mode we have three classification stages: + * Classification stages. + * We have two classification stages: Distributor/RSS * Permission/Distributor/RSS */ enum ice_flow_classification_stage { ICE_FLOW_STAGE_NONE = 0, ICE_FLOW_STAGE_RSS, - ICE_FLOW_STAGE_PERMISSION, ICE_FLOW_STAGE_DISTRIBUTOR, ICE_FLOW_STAGE_MAX, }; @@ -517,10 +515,6 @@ struct ice_flow_parser_node { void ice_register_flow_engine(struct ice_flow_engine *engine); int ice_flow_init(struct ice_adapter *ad); void ice_flow_uninit(struct ice_adapter *ad); -int ice_register_parser(struct ice_flow_parser *parser, - struct ice_adapter *ad); -void ice_unregister_parser(struct ice_flow_parser *parser, - struct ice_adapter *ad); struct ice_pattern_match_item * ice_search_pattern_match_item(struct ice_adapter *ad, const struct rte_flow_item pattern[], @@ -530,4 +524,9 @@ ice_search_pattern_match_item(struct ice_adapter *ad, int ice_flow_redirect(struct ice_adapter *ad, struct ice_flow_redirect *rd); + +extern struct ice_flow_parser ice_switch_parser; +extern struct ice_flow_parser ice_acl_parser; +extern struct ice_flow_parser ice_fdir_parser; +extern struct ice_flow_parser ice_hash_parser; #endif diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index 52646e9408e..f9236415337 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -572,7 +572,7 @@ static struct ice_flow_engine ice_hash_engine = { }; /* Register parser for os package. */ -static struct ice_flow_parser ice_hash_parser = { +struct ice_flow_parser ice_hash_parser = { .engine = &ice_hash_engine, .array = ice_hash_pattern_list, .array_len = RTE_DIM(ice_hash_pattern_list), @@ -587,16 +587,9 @@ RTE_INIT(ice_hash_engine_init) } static int -ice_hash_init(struct ice_adapter *ad) +ice_hash_init(struct ice_adapter *ad __rte_unused) { - struct ice_flow_parser *parser = NULL; - - if (ad->hw.dcf_enabled) - return 0; - - parser = &ice_hash_parser; - - return ice_register_parser(parser, ad); + return 0; } static int @@ -1033,7 +1026,7 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func, /* check invalid combination */ for (i = 0; i < RTE_DIM(invalid_rss_comb); i++) { - if (__builtin_popcountll(rss_type & invalid_rss_comb[i]) > 1) + if (rte_popcount64(rss_type & invalid_rss_comb[i]) > 1) return true; } @@ -1442,12 +1435,8 @@ ice_hash_destroy(struct ice_adapter *ad, } static void -ice_hash_uninit(struct ice_adapter *ad) +ice_hash_uninit(struct ice_adapter *ad __rte_unused) { - if (ad->hw.dcf_enabled) - return; - - ice_unregister_parser(&ice_hash_parser, ad); } static void diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 64c4486b4b3..ee9cb7b955b 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -2150,7 +2150,7 @@ ice_recv_scattered_pkts(void *rx_queue, } rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); - *RTE_MBUF_DYNFIELD(rxm, + *RTE_MBUF_DYNFIELD(first_seg, (ice_timestamp_dynfield_offset), rte_mbuf_timestamp_t *) = ts_ns; pkt_flags |= ice_timestamp_dynflag; @@ -3685,9 +3685,6 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, int i, ret; uint64_t ol_flags; struct rte_mbuf *m; - struct ice_tx_queue *txq = tx_queue; - struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; - uint16_t max_frame_size = dev->data->mtu + ICE_ETH_OVERHEAD; for (i = 0; i < nb_pkts; i++) { m = tx_pkts[i]; @@ -3704,11 +3701,8 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } - /* check the data_len in mbuf */ - if (m->data_len < ICE_TX_MIN_PKT_LEN || - m->data_len > max_frame_size) { + if (m->pkt_len < ICE_TX_MIN_PKT_LEN) { rte_errno = EINVAL; - PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); return i; } @@ -3727,7 +3721,6 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if (ice_check_empty_mbuf(m) != 0) { rte_errno = EINVAL; - PMD_DRV_LOG(ERR, "INVALID mbuf: last mbuf data_len=[0]"); return i; } } diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index fd13ff18f12..6f6d7909673 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -678,11 +678,11 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index c3b087c52ec..04148e8ea2d 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -680,11 +680,11 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, status0_7 = _mm256_packs_epi32(status0_7, _mm256_setzero_si256()); - uint64_t burst = __builtin_popcountll + uint64_t burst = rte_popcount64 (_mm_cvtsi128_si64 (_mm256_extracti128_si256 (status0_7, 1))); - burst += __builtin_popcountll + burst += rte_popcount64 (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 71fdd6ffb5d..9a1b7e3e51d 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -575,7 +575,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb0); ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc available number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + var = rte_popcount64(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != ICE_DESCS_PER_LOOP)) break; diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 110d8895fea..122b87f625a 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -201,11 +201,8 @@ struct ice_switch_filter_conf { struct ice_adv_rule_info rule_info; }; -static struct ice_flow_parser ice_switch_dist_parser; -static struct ice_flow_parser ice_switch_perm_parser; - static struct -ice_pattern_match_item ice_switch_pattern_dist_list[] = { +ice_pattern_match_item ice_switch_supported_pattern[] = { {pattern_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE}, @@ -288,90 +285,6 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, }; -static struct -ice_pattern_match_item ice_switch_pattern_perm_list[] = { - {pattern_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, -}; - static int ice_switch_create(struct ice_adapter *ad, struct rte_flow *flow, @@ -2137,35 +2050,14 @@ ice_switch_redirect(struct ice_adapter *ad, } static int -ice_switch_init(struct ice_adapter *ad) +ice_switch_init(struct ice_adapter *ad __rte_unused) { - int ret = 0; - struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser; - - if (ad->devargs.pipe_mode_support) { - perm_parser = &ice_switch_perm_parser; - ret = ice_register_parser(perm_parser, ad); - } else { - dist_parser = &ice_switch_dist_parser; - ret = ice_register_parser(dist_parser, ad); - } - return ret; + return 0; } static void -ice_switch_uninit(struct ice_adapter *ad) +ice_switch_uninit(struct ice_adapter *ad __rte_unused) { - struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser; - - if (ad->devargs.pipe_mode_support) { - perm_parser = &ice_switch_perm_parser; - ice_unregister_parser(perm_parser, ad); - } else { - dist_parser = &ice_switch_dist_parser; - ice_unregister_parser(dist_parser, ad); - } } static struct @@ -2180,24 +2072,15 @@ ice_flow_engine ice_switch_engine = { .type = ICE_FLOW_ENGINE_SWITCH, }; -static struct -ice_flow_parser ice_switch_dist_parser = { +struct +ice_flow_parser ice_switch_parser = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_dist_list, - .array_len = RTE_DIM(ice_switch_pattern_dist_list), + .array = ice_switch_supported_pattern, + .array_len = RTE_DIM(ice_switch_supported_pattern), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_DISTRIBUTOR, }; -static struct -ice_flow_parser ice_switch_perm_parser = { - .engine = &ice_switch_engine, - .array = ice_switch_pattern_perm_list, - .array_len = RTE_DIM(ice_switch_pattern_perm_list), - .parse_pattern_action = ice_switch_parse_pattern_action, - .stage = ICE_FLOW_STAGE_PERMISSION, -}; - RTE_INIT(ice_sw_engine_init) { struct ice_flow_engine *engine = &ice_switch_engine; diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index 460528854a8..a957fc5d3a1 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -28,21 +28,12 @@ if arch_subdir == 'x86' cflags += ['-fno-asynchronous-unwind-tables'] endif - # compile AVX2 version if either: - # a. we have AVX supported in minimum instruction set baseline - # b. it's not minimum instruction set, but supported by compiler - if cc.get_define('__AVX2__', args: machine_args) != '' - cflags += ['-DCC_AVX2_SUPPORT'] - sources += files('ice_rxtx_vec_avx2.c') - elif cc.has_argument('-mavx2') - cflags += ['-DCC_AVX2_SUPPORT'] - ice_avx2_lib = static_library('ice_avx2_lib', - 'ice_rxtx_vec_avx2.c', - dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash], - include_directories: includes, - c_args: [cflags, '-mavx2']) - objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') - endif + ice_avx2_lib = static_library('ice_avx2_lib', + 'ice_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') ice_avx512_cpu_support = ( cc.get_define('__AVX512F__', args: machine_args) != '' and diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index 3e3d81ca6df..64f22355804 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -74,7 +74,7 @@ idpf_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx, ring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc), IDPF_DMA_MEM_ALIGN); else - ring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_desc), + ring_size = RTE_ALIGN(len * sizeof(struct idpf_base_tx_desc), IDPF_DMA_MEM_ALIGN); rte_memcpy(ring_name, "idpf Tx ring", sizeof("idpf Tx ring")); break; diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c index e50fc73f43c..8145f1bb2a0 100644 --- a/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/drivers/net/ipn3ke/ipn3ke_representor.c @@ -27,7 +27,7 @@ #include "ipn3ke_ethdev.h" static int ipn3ke_rpst_scan_num; -static pthread_t ipn3ke_rpst_scan_thread; +static rte_thread_t ipn3ke_rpst_scan_thread; /** Double linked list of representor port. */ TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst); @@ -120,6 +120,7 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) uint64_t base_mac; uint32_t val; char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX]; + uint16_t i; rawdev = hw->rawdev; @@ -190,6 +191,11 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) ipn3ke_rpst_link_update(dev, 0); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -198,6 +204,7 @@ ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) { struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + uint16_t i; if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { /* Disable the TX path */ @@ -207,6 +214,11 @@ ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0); } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -2558,7 +2570,7 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst) return 0; } -static void * +static uint32_t ipn3ke_rpst_scan_handle_request(__rte_unused void *param) { struct ipn3ke_rpst *rpst; @@ -2580,10 +2592,10 @@ ipn3ke_rpst_scan_handle_request(__rte_unused void *param) rte_delay_us(50 * MS); if (num == 0 || num == 0xffffff) - return NULL; + return 0; } - return NULL; + return 0; } static int @@ -2592,20 +2604,19 @@ ipn3ke_rpst_scan_check(void) int ret; if (ipn3ke_rpst_scan_num == 1) { - ret = rte_ctrl_thread_create(&ipn3ke_rpst_scan_thread, - "ipn3ke scanner", - NULL, + ret = rte_thread_create_internal_control(&ipn3ke_rpst_scan_thread, + "ipn3ke-scn", ipn3ke_rpst_scan_handle_request, NULL); if (ret) { IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread"); return -1; } } else if (ipn3ke_rpst_scan_num == 0) { - ret = pthread_cancel(ipn3ke_rpst_scan_thread); + ret = pthread_cancel((pthread_t)ipn3ke_rpst_scan_thread.opaque_id); if (ret) IPN3KE_AFU_PMD_ERR("Can't cancel the thread"); - ret = pthread_join(ipn3ke_rpst_scan_thread, NULL); + ret = rte_thread_join(ipn3ke_rpst_scan_thread, NULL); if (ret) IPN3KE_AFU_PMD_ERR("Can't join the thread"); diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 14a7d571e05..d6cf00317e7 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -543,6 +543,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, .rxq_info_get = ixgbe_rxq_info_get, .txq_info_get = ixgbe_txq_info_get, + .recycle_rxq_info_get = ixgbe_recycle_rxq_info_get, .timesync_enable = ixgbe_timesync_enable, .timesync_disable = ixgbe_timesync_disable, .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, @@ -4328,11 +4329,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, * when there is no link thread running. */ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - if (rte_thread_create_control(&ad->link_thread_tid, - "ixgbe-link-handler", - NULL, - ixgbe_dev_setup_link_thread_handler, - dev) < 0) { + if (rte_thread_create_internal_control(&ad->link_thread_tid, + "ixgbe-link", + ixgbe_dev_setup_link_thread_handler, dev) < 0) { PMD_DRV_LOG(ERR, "Create link thread failed!"); /* NOTE: review for potential ordering optimization */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index 1291e9099c2..22fc3be3d8c 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -626,6 +626,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +void ixgbe_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); + int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c new file mode 100644 index 00000000000..d451562269e --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Arm Limited. + */ + +#include +#include + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" + +#pragma GCC diagnostic ignored "-Wcast-qual" + +void +ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + struct ixgbe_rx_entry *rxep; + volatile union ixgbe_adv_rx_desc *rxdp; + uint16_t rx_id; + uint64_t paddr; + uint64_t dma_addr; + uint16_t i; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + rxep = &rxq->sw_ring[rxq->rxrearm_start]; + + for (i = 0; i < nb_mbufs; i++) { + /* Initialize rxdp descs. */ + paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM; + dma_addr = rte_cpu_to_le_64(paddr); + /* Flush descriptors with pa dma_addr */ + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update the descriptor initializer index */ + rxq->rxrearm_start += nb_mbufs; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= nb_mbufs; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); +} + +uint16_t +ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) +{ + struct ixgbe_tx_queue *txq = tx_queue; + struct ixgbe_tx_entry *txep; + struct rte_mbuf **rxep; + int i, n; + uint32_t status; + uint16_t nb_recycle_mbufs; + uint16_t avail = 0; + uint16_t mbuf_ring_size = recycle_rxq_info->mbuf_ring_size; + uint16_t mask = recycle_rxq_info->mbuf_ring_size - 1; + uint16_t refill_requirement = recycle_rxq_info->refill_requirement; + uint16_t refill_head = *recycle_rxq_info->refill_head; + uint16_t receive_tail = *recycle_rxq_info->receive_tail; + + /* Get available recycling Rx buffers. */ + avail = (mbuf_ring_size - (refill_head - receive_tail)) & mask; + + /* Check Tx free thresh and Rx available space. */ + if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh) + return 0; + + /* check DD bits on threshold descriptor */ + status = txq->tx_ring[txq->tx_next_dd].wb.status; + if (!(status & IXGBE_ADVTXD_STAT_DD)) + return 0; + + n = txq->tx_rs_thresh; + nb_recycle_mbufs = n; + + /* Mbufs recycle can only support no ring buffer wrapping around. + * Two case for this: + * + * case 1: The refill head of Rx buffer ring needs to be aligned with + * buffer ring size. In this case, the number of Tx freeing buffers + * should be equal to refill_requirement. + * + * case 2: The refill head of Rx ring buffer does not need to be aligned + * with buffer ring size. In this case, the update of refill head can not + * exceed the Rx buffer ring size. + */ + if ((refill_requirement && refill_requirement != n) || + (!refill_requirement && (refill_head + n > mbuf_ring_size))) + return 0; + + /* First buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1). + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + rxep = recycle_rxq_info->mbuf_ring; + rxep += refill_head; + + if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { + /* Avoid txq contains buffers from unexpected mempool. */ + if (unlikely(recycle_rxq_info->mp + != txep[0].mbuf->pool)) + return 0; + + /* Directly put mbufs from Tx to Rx. */ + for (i = 0; i < n; i++) + rxep[i] = txep[i].mbuf; + } else { + for (i = 0; i < n; i++) { + rxep[i] = rte_pktmbuf_prefree_seg(txep[i].mbuf); + + /* If Tx buffers are not the last reference or from + * unexpected mempool, previous copied buffers are + * considered as invalid. + */ + if (unlikely(rxep[i] == NULL || + recycle_rxq_info->mp != txep[i].mbuf->pool)) + nb_recycle_mbufs = 0; + } + /* If Tx buffers are not the last reference or + * from unexpected mempool, all recycled buffers + * are put into mempool. + */ + if (nb_recycle_mbufs == 0) + for (i = 0; i < n; i++) { + if (rxep[i] != NULL) + rte_mempool_put(rxep[i]->pool, rxep[i]); + } + } + + /* Update counters for Tx. */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return nb_recycle_mbufs; +} diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 954ef241a08..90b0a7004f5 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2552,6 +2552,9 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { PMD_INIT_LOG(DEBUG, "Vector tx enabled."); +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) + dev->recycle_tx_mbufs_reuse = ixgbe_recycle_tx_mbufs_reuse_vec; +#endif dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; @@ -4890,7 +4893,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " "callback (port=%d).", dev->data->port_id); - +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) + dev->recycle_rx_descriptors_refill = + ixgbe_recycle_rx_descriptors_refill_vec; +#endif dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " @@ -4919,7 +4925,9 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) "burst size no less than %d (port=%d).", RTE_IXGBE_DESCS_PER_LOOP, dev->data->port_id); - +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) + dev->recycle_rx_descriptors_refill = ixgbe_recycle_rx_descriptors_refill_vec; +#endif dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " @@ -5691,6 +5699,31 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } +void +ixgbe_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) +{ + struct ixgbe_rx_queue *rxq; + struct ixgbe_adapter *adapter = dev->data->dev_private; + + rxq = dev->data->rx_queues[queue_id]; + + recycle_rxq_info->mbuf_ring = (void *)rxq->sw_ring; + recycle_rxq_info->mp = rxq->mb_pool; + recycle_rxq_info->mbuf_ring_size = rxq->nb_rx_desc; + recycle_rxq_info->receive_tail = &rxq->rx_tail; + + if (adapter->rx_vec_allowed) { +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) + recycle_rxq_info->refill_requirement = RTE_IXGBE_RXQ_REARM_THRESH; + recycle_rxq_info->refill_head = &rxq->rxrearm_start; +#endif + } else { + recycle_rxq_info->refill_requirement = rxq->rx_free_thresh; + recycle_rxq_info->refill_head = &rxq->rx_free_trigger; + } +} + /* * [VF] Initializes Receive Unit. */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 668a5b9814f..ee89c89929b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX]; extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; +uint16_t ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); +void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs); + uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index 90b254ea265..952b032eb6d 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -452,7 +452,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, if (unlikely(stat == 0)) { nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP; } else { - nb_pkts_recd += __builtin_ctz(stat) / IXGBE_UINT8_BIT; + nb_pkts_recd += rte_ctz32(stat) / IXGBE_UINT8_BIT; break; } } diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index bb34b27168d..f60808d576a 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -574,7 +574,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]); /* C.4 calc available number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + var = rte_popcount64(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != RTE_IXGBE_DESCS_PER_LOOP)) break; diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build index a18908ef7ce..0ae12dd5ff2 100644 --- a/drivers/net/ixgbe/meson.build +++ b/drivers/net/ixgbe/meson.build @@ -26,11 +26,13 @@ deps += ['hash', 'security'] if arch_subdir == 'x86' sources += files('ixgbe_rxtx_vec_sse.c') + sources += files('ixgbe_recycle_mbufs_vec_common.c') if is_windows and cc.get_id() != 'clang' cflags += ['-fno-asynchronous-unwind-tables'] endif elif arch_subdir == 'arm' sources += files('ixgbe_rxtx_vec_neon.c') + sources += files('ixgbe_recycle_mbufs_vec_common.c') endif includes += include_directories('base') diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c index 65685fe2361..7f66a7a7cf7 100644 --- a/drivers/net/mana/gdma.c +++ b/drivers/net/mana/gdma.c @@ -166,6 +166,97 @@ gdma_post_work_request(struct mana_gdma_queue *queue, return 0; } +#ifdef RTE_ARCH_32 +union gdma_short_doorbell_entry { + uint32_t as_uint32; + + struct { + uint32_t tail_ptr_incr : 16; /* Number of CQEs */ + uint32_t id : 12; + uint32_t reserved : 3; + uint32_t arm : 1; + } cq; + + struct { + uint32_t tail_ptr_incr : 16; /* In number of bytes */ + uint32_t id : 12; + uint32_t reserved : 4; + } rq; + + struct { + uint32_t tail_ptr_incr : 16; /* In number of bytes */ + uint32_t id : 12; + uint32_t reserved : 4; + } sq; + + struct { + uint32_t tail_ptr_incr : 16; /* Number of EQEs */ + uint32_t id : 12; + uint32_t reserved : 3; + uint32_t arm : 1; + } eq; +}; /* HW DATA */ + +enum { + DOORBELL_SHORT_OFFSET_SQ = 0x10, + DOORBELL_SHORT_OFFSET_RQ = 0x410, + DOORBELL_SHORT_OFFSET_CQ = 0x810, + DOORBELL_SHORT_OFFSET_EQ = 0xFF0, +}; + +/* + * Write to hardware doorbell to notify new activity. + */ +int +mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type, + uint32_t queue_id, uint32_t tail_incr, uint8_t arm) +{ + uint8_t *addr = db_page; + union gdma_short_doorbell_entry e = {}; + + if ((queue_id & ~GDMA_SHORT_DB_QID_MASK) || + (tail_incr & ~GDMA_SHORT_DB_INC_MASK)) { + DP_LOG(ERR, "%s: queue_id %u or " + "tail_incr %u overflowed, queue type %d", + __func__, queue_id, tail_incr, queue_type); + return -EINVAL; + } + + switch (queue_type) { + case GDMA_QUEUE_SEND: + e.sq.id = queue_id; + e.sq.tail_ptr_incr = tail_incr; + addr += DOORBELL_SHORT_OFFSET_SQ; + break; + + case GDMA_QUEUE_RECEIVE: + e.rq.id = queue_id; + e.rq.tail_ptr_incr = tail_incr; + addr += DOORBELL_SHORT_OFFSET_RQ; + break; + + case GDMA_QUEUE_COMPLETION: + e.cq.id = queue_id; + e.cq.tail_ptr_incr = tail_incr; + e.cq.arm = arm; + addr += DOORBELL_SHORT_OFFSET_CQ; + break; + + default: + DP_LOG(ERR, "Unsupported queue type %d", queue_type); + return -1; + } + + /* Ensure all writes are done before ringing doorbell */ + rte_wmb(); + + DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u", + db_page, addr, queue_id, queue_type, tail_incr, arm); + + rte_write32(e.as_uint32, addr); + return 0; +} +#else union gdma_doorbell_entry { uint64_t as_uint64; @@ -248,6 +339,7 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, rte_write64(e.as_uint64, addr); return 0; } +#endif /* * Poll completion queue for completions. diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c index 7630118d4f2..41c3cf25958 100644 --- a/drivers/net/mana/mana.c +++ b/drivers/net/mana/mana.c @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -286,11 +288,12 @@ mana_dev_info_get(struct rte_eth_dev *dev, { struct mana_priv *priv = dev->data->dev_private; - dev_info->max_mtu = RTE_ETHER_MTU; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = MANA_MAX_MTU; /* RX params */ dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE; - dev_info->max_rx_pktlen = MAX_FRAME_SIZE; + dev_info->max_rx_pktlen = MANA_MAX_MTU + RTE_ETHER_HDR_LEN; dev_info->max_rx_queues = priv->max_rx_queues; dev_info->max_tx_queues = priv->max_tx_queues; @@ -700,6 +703,94 @@ mana_dev_stats_reset(struct rte_eth_dev *dev __rte_unused) return 0; } +static int +mana_get_ifname(const struct mana_priv *priv, char (*ifname)[IF_NAMESIZE]) +{ + int ret; + DIR *dir; + struct dirent *dent; + + MANA_MKSTR(dirpath, "%s/device/net", priv->ib_ctx->device->ibdev_path); + + dir = opendir(dirpath); + if (dir == NULL) + return -ENODEV; + + while ((dent = readdir(dir)) != NULL) { + char *name = dent->d_name; + FILE *file; + struct rte_ether_addr addr; + char *mac = NULL; + + if ((name[0] == '.') && + ((name[1] == '\0') || + ((name[1] == '.') && (name[2] == '\0')))) + continue; + + MANA_MKSTR(path, "%s/%s/address", dirpath, name); + + file = fopen(path, "r"); + if (!file) { + ret = -ENODEV; + break; + } + + ret = fscanf(file, "%ms", &mac); + fclose(file); + + if (ret <= 0) { + ret = -EINVAL; + break; + } + + ret = rte_ether_unformat_addr(mac, &addr); + free(mac); + if (ret) + break; + + if (rte_is_same_ether_addr(&addr, priv->dev_data->mac_addrs)) { + strlcpy(*ifname, name, sizeof(*ifname)); + ret = 0; + break; + } + } + + closedir(dir); + return ret; +} + +static int +mana_ifreq(const struct mana_priv *priv, int req, struct ifreq *ifr) +{ + int sock, ret; + + sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); + if (sock == -1) + return -errno; + + ret = mana_get_ifname(priv, &ifr->ifr_name); + if (ret) { + close(sock); + return ret; + } + + if (ioctl(sock, req, ifr) == -1) + ret = -errno; + + close(sock); + + return ret; +} + +static int +mana_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mana_priv *priv = dev->data->dev_private; + struct ifreq request = { .ifr_mtu = mtu, }; + + return mana_ifreq(priv, SIOCSIFMTU, &request); +} + static const struct eth_dev_ops mana_dev_ops = { .dev_configure = mana_dev_configure, .dev_start = mana_dev_start, @@ -720,6 +811,7 @@ static const struct eth_dev_ops mana_dev_ops = { .link_update = mana_dev_link_update, .stats_get = mana_dev_stats_get, .stats_reset = mana_dev_stats_reset, + .mtu_set = mana_mtu_set, }; static const struct eth_dev_ops mana_dev_secondary_ops = { @@ -822,7 +914,6 @@ get_port_mac(struct ibv_device *device, unsigned int port, DIR *dir; struct dirent *dent; unsigned int dev_port; - char mac[20]; MANA_MKSTR(path, "%s/device/net", device->ibdev_path); @@ -832,6 +923,7 @@ get_port_mac(struct ibv_device *device, unsigned int port, while ((dent = readdir(dir))) { char *name = dent->d_name; + char *mac = NULL; MANA_MKSTR(port_path, "%s/%s/dev_port", path, name); @@ -859,7 +951,7 @@ get_port_mac(struct ibv_device *device, unsigned int port, if (!file) continue; - ret = fscanf(file, "%s", mac); + ret = fscanf(file, "%ms", &mac); fclose(file); if (ret < 0) @@ -868,6 +960,8 @@ get_port_mac(struct ibv_device *device, unsigned int port, ret = rte_ether_unformat_addr(mac, addr); if (ret) DRV_LOG(ERR, "unrecognized mac addr %s", mac); + + free(mac); break; } } @@ -1260,7 +1354,7 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, /* Create a parent domain with the port number */ attr.pd = priv->ib_pd; attr.comp_mask = IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT; - attr.pd_context = (void *)(uint64_t)port; + attr.pd_context = (void *)(uintptr_t)port; priv->ib_parent_pd = ibv_alloc_parent_domain(ctx, &attr); if (!priv->ib_parent_pd) { DRV_LOG(ERR, "ibv_alloc_parent_domain failed port %d", port); diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h index 5801491d75e..6836872dc25 100644 --- a/drivers/net/mana/mana.h +++ b/drivers/net/mana/mana.h @@ -16,8 +16,8 @@ struct mana_shared_data { unsigned int secondary_cnt; }; +#define MANA_MAX_MTU 9000 #define MIN_RX_BUF_SIZE 1024 -#define MAX_FRAME_SIZE RTE_ETHER_MAX_LEN #define MANA_MAX_MAC_ADDR 1 #define MANA_DEV_RX_OFFLOAD_SUPPORT ( \ @@ -50,6 +50,21 @@ struct mana_shared_data { #define MAX_TX_WQE_SIZE 512 #define MAX_RX_WQE_SIZE 256 +/* For 32 bit only */ +#ifdef RTE_ARCH_32 +#define GDMA_SHORT_DB_INC_MASK 0xffff +#define GDMA_SHORT_DB_QID_MASK 0xfff + +#define GDMA_SHORT_DB_MAX_WQE (0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE) + +#define TX_WQE_SHORT_DB_THRESHOLD \ + (GDMA_SHORT_DB_MAX_WQE - \ + (MAX_TX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE)) +#define RX_WQE_SHORT_DB_THRESHOLD \ + (GDMA_SHORT_DB_MAX_WQE - \ + (MAX_RX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE)) +#endif + /* Values from the GDMA specification document, WQE format description */ #define INLINE_OOB_SMALL_SIZE_IN_BYTES 8 #define INLINE_OOB_LARGE_SIZE_IN_BYTES 24 @@ -425,6 +440,11 @@ struct mana_rxq { */ uint32_t desc_ring_head, desc_ring_tail; +#ifdef RTE_ARCH_32 + /* For storing wqe increment count btw each short doorbell ring */ + uint32_t wqe_cnt_to_short_db; +#endif + struct mana_gdma_queue gdma_rq; struct mana_gdma_queue gdma_cq; struct gdma_comp *gdma_comp_buf; @@ -447,7 +467,7 @@ extern int mana_logtype_init; __func__, ## args) #define DP_LOG(level, fmt, args...) \ - RTE_LOG_DP(level, PMD, fmt, ## args) + RTE_LOG_DP(level, PMD, fmt "\n", ## args) #define PMD_INIT_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, mana_logtype_init, "%s(): " fmt "\n",\ @@ -455,8 +475,14 @@ extern int mana_logtype_init; #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#ifdef RTE_ARCH_32 +int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type, + uint32_t queue_id, uint32_t tail_incr, + uint8_t arm); +#else int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type, uint32_t queue_id, uint32_t tail, uint8_t arm); +#endif int mana_rq_ring_doorbell(struct mana_rxq *rxq); int gdma_post_work_request(struct mana_gdma_queue *queue, diff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build index 493f0d26d48..2d72eca5a8c 100644 --- a/drivers/net/mana/meson.build +++ b/drivers/net/mana/meson.build @@ -1,9 +1,9 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2022 Microsoft Corporation -if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64') +if not is_linux or not dpdk_conf.has('RTE_ARCH_X86') build = false - reason = 'only supported on x86_64 Linux' + reason = 'only supported on x86 Linux' subdir_done() endif diff --git a/drivers/net/mana/mr.c b/drivers/net/mana/mr.c index fec0dc961ca..b8e6ea0bbfe 100644 --- a/drivers/net/mana/mr.c +++ b/drivers/net/mana/mr.c @@ -53,7 +53,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, } DP_LOG(DEBUG, - "registering memory chunk start 0x%" PRIx64 " len %u", + "registering memory chunk start 0x%" PRIxPTR " len %u", ranges[i].start, ranges[i].len); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { @@ -62,7 +62,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, ranges[i].len); if (ret) { DP_LOG(ERR, - "MR failed start 0x%" PRIx64 " len %u", + "MR failed start 0x%" PRIxPTR " len %u", ranges[i].start, ranges[i].len); return ret; } @@ -72,7 +72,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, ibv_mr = ibv_reg_mr(priv->ib_pd, (void *)ranges[i].start, ranges[i].len, IBV_ACCESS_LOCAL_WRITE); if (ibv_mr) { - DP_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64, + DP_LOG(DEBUG, "MR lkey %u addr %p len %zu", ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); @@ -99,7 +99,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, return ret; } } else { - DP_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u", + DP_LOG(ERR, "MR failed at 0x%" PRIxPTR " len %u", ranges[i].start, ranges[i].len); return -errno; } @@ -141,7 +141,7 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, mr = mana_mr_btree_lookup(local_mr_btree, &idx, (uintptr_t)mbuf->buf_addr, mbuf->buf_len); if (mr) { - DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64, + DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu", mr->lkey, mr->addr, mr->len); return mr; } @@ -162,7 +162,7 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, } DP_LOG(DEBUG, - "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64, + "Added local MR key %u addr 0x%" PRIxPTR " len %zu", mr->lkey, mr->addr, mr->len); return mr; } @@ -266,7 +266,7 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, return &table[base]; DP_LOG(DEBUG, - "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found", + "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found", addr, len, *idx, addr + len); return NULL; @@ -316,7 +316,7 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) uint16_t shift; if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { - DP_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree", + DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree", entry->addr, entry->len); return 0; } @@ -340,7 +340,7 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) bt->len++; DP_LOG(DEBUG, - "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu", + "Inserted MR b-tree table %p idx %d addr 0x%" PRIxPTR " len %zu", table, idx, entry->addr, entry->len); return 0; diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c index 14d90858018..63042fe9b92 100644 --- a/drivers/net/mana/rx.c +++ b/drivers/net/mana/rx.c @@ -39,10 +39,18 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq) /* Hardware Spec specifies that software client should set 0 for * wqe_cnt for Receive Queues. */ +#ifdef RTE_ARCH_32 + ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_RECEIVE, + rxq->gdma_rq.id, + rxq->wqe_cnt_to_short_db * + GDMA_WQE_ALIGNMENT_UNIT_SIZE, + 0); +#else ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE, rxq->gdma_rq.id, rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE, 0); +#endif if (ret) DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret); @@ -97,6 +105,9 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) /* update queue for tracking pending packets */ desc->pkt = mbuf; desc->wqe_size_in_bu = wqe_size_in_bu; +#ifdef RTE_ARCH_32 + rxq->wqe_cnt_to_short_db += wqe_size_in_bu; +#endif rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc; } else { DP_LOG(DEBUG, "failed to post recv ret %d", ret); @@ -115,12 +126,22 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq) int ret; uint32_t i; +#ifdef RTE_ARCH_32 + rxq->wqe_cnt_to_short_db = 0; +#endif for (i = 0; i < rxq->num_desc; i++) { ret = mana_alloc_and_post_rx_wqe(rxq); if (ret) { DP_LOG(ERR, "failed to post RX ret = %d", ret); return ret; } + +#ifdef RTE_ARCH_32 + if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) { + mana_rq_ring_doorbell(rxq); + rxq->wqe_cnt_to_short_db = 0; + } +#endif } mana_rq_ring_doorbell(rxq); @@ -134,6 +155,10 @@ mana_stop_rx_queues(struct rte_eth_dev *dev) struct mana_priv *priv = dev->data->dev_private; int ret, i; + for (i = 0; i < priv->num_queues; i++) + if (dev->data->rx_queue_state[i] == RTE_ETH_QUEUE_STATE_STOPPED) + return -EINVAL; + if (priv->rwq_qp) { ret = ibv_destroy_qp(priv->rwq_qp); if (ret) @@ -190,7 +215,10 @@ mana_stop_rx_queues(struct rte_eth_dev *dev) memset(&rxq->gdma_rq, 0, sizeof(rxq->gdma_rq)); memset(&rxq->gdma_cq, 0, sizeof(rxq->gdma_cq)); + + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } + return 0; } @@ -202,6 +230,11 @@ mana_start_rx_queues(struct rte_eth_dev *dev) struct ibv_wq *ind_tbl[priv->num_queues]; DRV_LOG(INFO, "start rx queues"); + + for (i = 0; i < priv->num_queues; i++) + if (dev->data->rx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) + return -EINVAL; + for (i = 0; i < priv->num_queues; i++) { struct mana_rxq *rxq = dev->data->rx_queues[i]; struct ibv_wq_init_attr wq_attr = {}; @@ -376,6 +409,9 @@ mana_start_rx_queues(struct rte_eth_dev *dev) goto fail; } + for (i = 0; i < priv->num_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; fail: @@ -397,6 +433,10 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) uint32_t i; int polled = 0; +#ifdef RTE_ARCH_32 + rxq->wqe_cnt_to_short_db = 0; +#endif + repoll: /* Polling on new completions if we have no backlog */ if (rxq->comp_buf_idx == rxq->comp_buf_len) { @@ -505,6 +545,16 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) wqe_posted++; if (pkt_received == pkts_n) break; + +#ifdef RTE_ARCH_32 + /* Ring short doorbell if approaching the wqe increment + * limit. + */ + if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) { + mana_rq_ring_doorbell(rxq); + rxq->wqe_cnt_to_short_db = 0; + } +#endif } rxq->backlog_idx = pkt_idx; @@ -525,6 +575,15 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) return pkt_received; } +#ifdef RTE_ARCH_32 +static int +mana_arm_cq(struct mana_rxq *rxq __rte_unused, uint8_t arm __rte_unused) +{ + DP_LOG(ERR, "Do not support in 32 bit"); + + return -ENODEV; +} +#else static int mana_arm_cq(struct mana_rxq *rxq, uint8_t arm) { @@ -538,6 +597,7 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm) return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION, rxq->gdma_cq.id, head, arm); } +#endif int mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c index 11ba2ee1ac5..c50385d9fe6 100644 --- a/drivers/net/mana/tx.c +++ b/drivers/net/mana/tx.c @@ -15,6 +15,10 @@ mana_stop_tx_queues(struct rte_eth_dev *dev) struct mana_priv *priv = dev->data->dev_private; int i, ret; + for (i = 0; i < priv->num_queues; i++) + if (dev->data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STOPPED) + return -EINVAL; + for (i = 0; i < priv->num_queues; i++) { struct mana_txq *txq = dev->data->tx_queues[i]; @@ -51,6 +55,8 @@ mana_stop_tx_queues(struct rte_eth_dev *dev) memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq)); memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq)); + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } return 0; @@ -63,6 +69,11 @@ mana_start_tx_queues(struct rte_eth_dev *dev) int ret, i; /* start TX queues */ + + for (i = 0; i < priv->num_queues; i++) + if (dev->data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) + return -EINVAL; + for (i = 0; i < priv->num_queues; i++) { struct mana_txq *txq; struct ibv_qp_init_attr qp_attr = { 0 }; @@ -142,6 +153,8 @@ mana_start_tx_queues(struct rte_eth_dev *dev) txq->gdma_cq.id, txq->gdma_cq.buffer, txq->gdma_cq.count, txq->gdma_cq.size, txq->gdma_cq.head); + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } return 0; @@ -176,6 +189,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) void *db_page; uint16_t pkt_sent = 0; uint32_t num_comp, i; +#ifdef RTE_ARCH_32 + uint32_t wqe_count = 0; +#endif /* Process send completions from GDMA */ num_comp = gdma_poll_completion_queue(&txq->gdma_cq, @@ -418,6 +434,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent", nb_pkts, pkt_idx); +#ifdef RTE_ARCH_32 + wqe_count += wqe_size_in_bu; + if (wqe_count > TX_WQE_SHORT_DB_THRESHOLD) { + /* wqe_count approaching to short doorbell + * increment limit. Stop processing further + * more packets and just ring short + * doorbell. + */ + DP_LOG(DEBUG, "wqe_count %u reaching limit, " + "pkt_sent %d", + wqe_count, pkt_sent); + break; + } +#endif } else { DP_LOG(DEBUG, "pkt[%d] failed to post send ret %d", pkt_idx, ret); @@ -436,11 +466,19 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } if (pkt_sent) { +#ifdef RTE_ARCH_32 + ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_SEND, + txq->gdma_sq.id, + wqe_count * + GDMA_WQE_ALIGNMENT_UNIT_SIZE, + 0); +#else ret = mana_ring_doorbell(db_page, GDMA_QUEUE_SEND, txq->gdma_sq.id, txq->gdma_sq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE, 0); +#endif if (ret) DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret); } diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c index 6a8ff5b4eb6..7cc8c0da91e 100644 --- a/drivers/net/memif/rte_eth_memif.c +++ b/drivers/net/memif/rte_eth_memif.c @@ -356,7 +356,7 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rx_pkts = 0; pkts = nb_pkts < MAX_PKT_BURST ? nb_pkts : MAX_PKT_BURST; while (n_slots && rx_pkts < pkts) { - mbuf_head = mbufs[n_rx_pkts]; + mbuf_head = mbufs[rx_pkts]; mbuf = mbuf_head; next_slot1: @@ -1358,6 +1358,7 @@ memif_dev_start(struct rte_eth_dev *dev) { struct pmd_internals *pmd = dev->data->dev_private; int ret = 0; + uint16_t i; switch (pmd->role) { case MEMIF_ROLE_CLIENT: @@ -1372,13 +1373,28 @@ memif_dev_start(struct rte_eth_dev *dev) break; } + if (ret == 0) { + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + return ret; } static int memif_dev_stop(struct rte_eth_dev *dev) { + uint16_t i; + memif_disconnect(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index a54016f4a23..1389b606ccb 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -292,6 +292,7 @@ mlx4_dev_start(struct rte_eth_dev *dev) { struct mlx4_priv *priv = dev->data->dev_private; struct rte_flow_error error; + uint16_t i; int ret; if (priv->started) @@ -327,6 +328,12 @@ mlx4_dev_start(struct rte_eth_dev *dev) dev->rx_pkt_burst = mlx4_rx_burst; /* Enable datapath on secondary process. */ mlx4_mp_req_start_rxtx(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; err: mlx4_dev_stop(dev); @@ -345,6 +352,7 @@ static int mlx4_dev_stop(struct rte_eth_dev *dev) { struct mlx4_priv *priv = dev->data->dev_private; + uint16_t i; if (!priv->started) return 0; @@ -359,6 +367,11 @@ mlx4_dev_stop(struct rte_eth_dev *dev) mlx4_rxq_intr_disable(priv); mlx4_rss_deinit(priv); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h index ec2230d1361..ea8bf683f37 100644 --- a/drivers/net/mlx5/hws/mlx5dr.h +++ b/drivers/net/mlx5/hws/mlx5dr.h @@ -139,6 +139,8 @@ struct mlx5dr_matcher_attr { uint8_t num_log; } rule; }; + /* Optional AT attach configuration - Max number of additional AT */ + uint8_t max_num_of_at_attach; }; struct mlx5dr_rule_attr { @@ -156,8 +158,21 @@ struct mlx5dr_devx_obj { uint32_t id; }; -/* In actions that take offset, the offset is unique, and the user should not - * reuse the same index because data changing is not atomic. +struct mlx5dr_action_reformat_header { + size_t sz; + void *data; +}; + +struct mlx5dr_action_mh_pattern { + /* Byte size of modify actions provided by "data" */ + size_t sz; + /* PRM format modify actions pattern */ + __be64 *data; +}; + +/* In actions that take offset, the offset is unique, pointing to a single + * resource and the user should not reuse the same index because data changing + * is not atomic. */ struct mlx5dr_rule_action { struct mlx5dr_action *action; @@ -172,11 +187,13 @@ struct mlx5dr_rule_action { struct { uint32_t offset; + uint8_t pattern_idx; uint8_t *data; } modify_header; struct { uint32_t offset; + uint8_t hdr_idx; uint8_t *data; } reformat; @@ -237,6 +254,18 @@ mlx5dr_table_create(struct mlx5dr_context *ctx, */ int mlx5dr_table_destroy(struct mlx5dr_table *tbl); +/* Set default miss table for mlx5dr_table by using another mlx5dr_table + * Traffic which all table matchers miss will be forwarded to miss table. + * + * @param[in] tbl + * source mlx5dr table + * @param[in] miss_tbl + * target (miss) mlx5dr table, or NULL to remove current miss table + * @return zero on success non zero otherwise. + */ +int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, + struct mlx5dr_table *miss_tbl); + /* Create new match template based on items mask, the match template * will be used for matcher creation. * @@ -313,6 +342,17 @@ mlx5dr_matcher_create(struct mlx5dr_table *table, */ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher); +/* Attach new action template to direct rule matcher. + * + * @param[in] matcher + * Matcher to attach at to. + * @param[in] at + * Action template to be attached to the matcher. + * @return zero on success non zero otherwise. + */ +int mlx5dr_matcher_attach_at(struct mlx5dr_matcher *matcher, + struct mlx5dr_action_template *at); + /* Get the size of the rule handle (mlx5dr_rule) to be used on rule creation. * * @return size in bytes of rule handle struct. @@ -481,12 +521,12 @@ mlx5dr_action_create_counter(struct mlx5dr_context *ctx, * The context in which the new action will be created. * @param[in] reformat_type * Type of reformat prefixed with MLX5DR_ACTION_TYP_REFORMAT. - * @param[in] data_sz - * Size in bytes of data. - * @param[in] inline_data - * Header data array in case of inline action. + * @param[in] num_of_hdrs + * Number of provided headers in "hdrs" array. + * @param[in] hdrs + * Headers array containing header information. * @param[in] log_bulk_size - * Number of unique values used with this pattern. + * Number of unique values used with this reformat. * @param[in] flags * Action creation flags. (enum mlx5dr_action_flags) * @return pointer to mlx5dr_action on success NULL otherwise. @@ -494,8 +534,8 @@ mlx5dr_action_create_counter(struct mlx5dr_context *ctx, struct mlx5dr_action * mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, enum mlx5dr_action_type reformat_type, - size_t data_sz, - void *inline_data, + uint8_t num_of_hdrs, + struct mlx5dr_action_reformat_header *hdrs, uint32_t log_bulk_size, uint32_t flags); @@ -503,10 +543,10 @@ mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, * * @param[in] ctx * The context in which the new action will be created. - * @param[in] pattern_sz - * Byte size of the pattern array. - * @param[in] pattern - * PRM format modify pattern action array. + * @param[in] num_of_patterns + * Number of provided patterns in "patterns" array. + * @param[in] patterns + * Patterns array containing pattern information. * @param[in] log_bulk_size * Number of unique values used with this pattern. * @param[in] flags @@ -515,8 +555,8 @@ mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, */ struct mlx5dr_action * mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx, - size_t pattern_sz, - __be64 pattern[], + uint8_t num_of_patterns, + struct mlx5dr_action_mh_pattern *patterns, uint32_t log_bulk_size, uint32_t flags); diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c index 920099ba5b1..ea9fc23732f 100644 --- a/drivers/net/mlx5/hws/mlx5dr_action.c +++ b/drivers/net/mlx5/hws/mlx5dr_action.c @@ -481,6 +481,8 @@ static uint32_t mlx5dr_action_get_mh_stc_type(__be64 pattern) return MLX5_IFC_STC_ACTION_TYPE_ADD; case MLX5_MODIFICATION_TYPE_COPY: return MLX5_IFC_STC_ACTION_TYPE_COPY; + case MLX5_MODIFICATION_TYPE_ADD_FIELD: + return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD; default: assert(false); DR_LOG(ERR, "Unsupported action type: 0x%x", action_type); @@ -529,7 +531,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action, } else { attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST; attr->modify_header.arg_id = action->modify_header.arg_obj->id; - attr->modify_header.pattern_id = action->modify_header.pattern_obj->id; + attr->modify_header.pattern_id = action->modify_header.pat_obj->id; } break; case MLX5DR_ACTION_TYP_TBL: @@ -705,11 +707,13 @@ mlx5dr_action_is_hws_flags(uint32_t flags) } static struct mlx5dr_action * -mlx5dr_action_create_generic(struct mlx5dr_context *ctx, - uint32_t flags, - enum mlx5dr_action_type action_type) +mlx5dr_action_create_generic_bulk(struct mlx5dr_context *ctx, + uint32_t flags, + enum mlx5dr_action_type action_type, + uint8_t bulk_sz) { struct mlx5dr_action *action; + int i; if (!mlx5dr_action_is_root_flags(flags) && !mlx5dr_action_is_hws_flags(flags)) { @@ -725,20 +729,30 @@ mlx5dr_action_create_generic(struct mlx5dr_context *ctx, return NULL; } - action = simple_calloc(1, sizeof(*action)); + action = simple_calloc(bulk_sz, sizeof(*action)); if (!action) { DR_LOG(ERR, "Failed to allocate memory for action [%d]", action_type); rte_errno = ENOMEM; return NULL; } - action->ctx = ctx; - action->flags = flags; - action->type = action_type; + for (i = 0; i < bulk_sz; i++) { + action[i].ctx = ctx; + action[i].flags = flags; + action[i].type = action_type; + } return action; } +static struct mlx5dr_action * +mlx5dr_action_create_generic(struct mlx5dr_context *ctx, + uint32_t flags, + enum mlx5dr_action_type action_type) +{ + return mlx5dr_action_create_generic_bulk(ctx, flags, action_type, 1); +} + struct mlx5dr_action * mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx, struct mlx5dr_table *tbl, @@ -1141,7 +1155,7 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags) return NULL; } -static void +static int mlx5dr_action_conv_reformat_to_verbs(uint32_t action_type, uint32_t *verb_reformat_type) { @@ -1149,19 +1163,23 @@ mlx5dr_action_conv_reformat_to_verbs(uint32_t action_type, case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: *verb_reformat_type = MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2; - break; + return 0; case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: *verb_reformat_type = MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL; - break; + return 0; case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: *verb_reformat_type = MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2; - break; + return 0; case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: *verb_reformat_type = MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; - break; + return 0; + default: + DR_LOG(ERR, "Invalid root reformat action type"); + rte_errno = EINVAL; + return rte_errno; } } @@ -1199,7 +1217,9 @@ mlx5dr_action_create_reformat_root(struct mlx5dr_action *action, if (ret) return rte_errno; - mlx5dr_action_conv_reformat_to_verbs(action->type, &verb_reformat_type); + ret = mlx5dr_action_conv_reformat_to_verbs(action->type, &verb_reformat_type); + if (ret) + return rte_errno; /* Create the reformat type for root table */ ibv_ctx = mlx5dr_context_get_local_ibv(action->ctx); @@ -1210,6 +1230,7 @@ mlx5dr_action_create_reformat_root(struct mlx5dr_action *action, verb_reformat_type, ft_type); if (!action->flow_action) { + DR_LOG(ERR, "Failed to create dv_create_flow reformat"); rte_errno = errno; return rte_errno; } @@ -1217,132 +1238,84 @@ mlx5dr_action_create_reformat_root(struct mlx5dr_action *action, return 0; } -static int mlx5dr_action_handle_reformat_args(struct mlx5dr_context *ctx, - size_t data_sz, - void *data, - uint32_t bulk_size, - struct mlx5dr_action *action) -{ - uint32_t args_log_size; - int ret; - - if (data_sz % 2 != 0) { - DR_LOG(ERR, "Data size should be multiply of 2"); - rte_errno = EINVAL; - return rte_errno; - } - action->reformat.header_size = data_sz; - - args_log_size = mlx5dr_arg_data_size_to_arg_log_size(data_sz); - if (args_log_size >= MLX5DR_ARG_CHUNK_SIZE_MAX) { - DR_LOG(ERR, "Data size is bigger than supported"); - rte_errno = EINVAL; - return rte_errno; +static int +mlx5dr_action_handle_l2_to_tunnel_l2(struct mlx5dr_action *action, + uint8_t num_of_hdrs, + struct mlx5dr_action_reformat_header *hdrs, + uint32_t log_bulk_sz) +{ + struct mlx5dr_devx_obj *arg_obj; + size_t max_sz = 0; + int ret, i; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].sz % 2 != 0) { + DR_LOG(ERR, "Header data size should be multiply of 2"); + rte_errno = EINVAL; + return rte_errno; + } + max_sz = RTE_MAX(hdrs[i].sz, max_sz); } - args_log_size += bulk_size; - if (!mlx5dr_arg_is_valid_arg_request_size(ctx, args_log_size)) { - DR_LOG(ERR, "Arg size %d does not fit FW requests", - args_log_size); - rte_errno = EINVAL; + /* Allocate single shared arg object for all headers */ + arg_obj = mlx5dr_arg_create(action->ctx, + hdrs->data, + max_sz, + log_bulk_sz, + action->flags & MLX5DR_ACTION_FLAG_SHARED); + if (!arg_obj) return rte_errno; - } - action->reformat.arg_obj = mlx5dr_cmd_arg_create(ctx->ibv_ctx, - args_log_size, - ctx->pd_num); - if (!action->reformat.arg_obj) { - DR_LOG(ERR, "Failed to create arg for reformat"); - return rte_errno; - } + for (i = 0; i < num_of_hdrs; i++) { + action[i].reformat.arg_obj = arg_obj; + action[i].reformat.header_size = hdrs[i].sz; + action[i].reformat.num_of_hdrs = num_of_hdrs; + action[i].reformat.max_hdr_sz = max_sz; - /* When INLINE need to write the arg data */ - if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { - ret = mlx5dr_arg_write_inline_arg_data(ctx, - action->reformat.arg_obj->id, - data, - data_sz); + ret = mlx5dr_action_create_stcs(&action[i], NULL); if (ret) { - DR_LOG(ERR, "Failed to write inline arg for reformat"); - goto free_arg; + DR_LOG(ERR, "Failed to create stc for reformat"); + goto free_stc; } } return 0; -free_arg: - mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); - return ret; -} - -static int mlx5dr_action_handle_l2_to_tunnel_l2(struct mlx5dr_context *ctx, - size_t data_sz, - void *data, - uint32_t bulk_size, - struct mlx5dr_action *action) -{ - int ret; +free_stc: + while (i--) + mlx5dr_action_destroy_stcs(&action[i]); - ret = mlx5dr_action_handle_reformat_args(ctx, data_sz, data, bulk_size, - action); - if (ret) { - DR_LOG(ERR, "Failed to create args for reformat"); - return ret; - } - - ret = mlx5dr_action_create_stcs(action, NULL); - if (ret) { - DR_LOG(ERR, "Failed to create stc for reformat"); - goto free_arg; - } - - return 0; - -free_arg: - mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); + mlx5dr_cmd_destroy_obj(arg_obj); return ret; } -static int mlx5dr_action_get_shared_stc_offset(struct mlx5dr_context_common_res *common_res, - enum mlx5dr_context_shared_stc_type stc_type) -{ - return common_res->shared_stc[stc_type]->remove_header.offset; -} - -static int mlx5dr_action_handle_l2_to_tunnel_l3(struct mlx5dr_context *ctx, - size_t data_sz, - void *data, - uint32_t bulk_size, - struct mlx5dr_action *action) +static int +mlx5dr_action_handle_l2_to_tunnel_l3(struct mlx5dr_action *action, + uint8_t num_of_hdrs, + struct mlx5dr_action_reformat_header *hdrs, + uint32_t log_bulk_sz) { int ret; - ret = mlx5dr_action_handle_reformat_args(ctx, data_sz, data, bulk_size, - action); - if (ret) { - DR_LOG(ERR, "Failed to create args for reformat"); - return ret; - } - /* The action is remove-l2-header + insert-l3-header */ ret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP); if (ret) { DR_LOG(ERR, "Failed to create remove stc for reformat"); - goto free_arg; + return ret; } - ret = mlx5dr_action_create_stcs(action, NULL); - if (ret) { - DR_LOG(ERR, "Failed to create insert stc for reformat"); - goto down_shared; - } + /* Reuse the insert with pointer for the L2L3 header */ + ret = mlx5dr_action_handle_l2_to_tunnel_l2(action, + num_of_hdrs, + hdrs, + log_bulk_sz); + if (ret) + goto put_shared_stc; return 0; -down_shared: +put_shared_stc: mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP); -free_arg: - mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); return ret; } @@ -1393,67 +1366,81 @@ static void mlx5dr_action_prepare_decap_l3_actions(size_t data_sz, } static int -mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_context *ctx, - size_t data_sz, - void *data, - uint32_t bulk_size, - struct mlx5dr_action *action) +mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, + uint8_t num_of_hdrs, + struct mlx5dr_action_reformat_header *hdrs, + uint32_t log_bulk_sz) { uint8_t mh_data[MLX5DR_ACTION_REFORMAT_DATA_SIZE] = {0}; + struct mlx5dr_devx_obj *arg_obj, *pat_obj; + struct mlx5dr_context *ctx = action->ctx; int num_of_actions; int mh_data_size; - int ret; + int ret, i; - if (data_sz != MLX5DR_ACTION_HDR_LEN_L2 && - data_sz != MLX5DR_ACTION_HDR_LEN_L2_W_VLAN) { - DR_LOG(ERR, "Data size is not supported for decap-l3"); - rte_errno = EINVAL; - return rte_errno; + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].sz != MLX5DR_ACTION_HDR_LEN_L2 && + hdrs[i].sz != MLX5DR_ACTION_HDR_LEN_L2_W_VLAN) { + DR_LOG(ERR, "Data size is not supported for decap-l3"); + rte_errno = EINVAL; + return rte_errno; + } } - mlx5dr_action_prepare_decap_l3_actions(data_sz, mh_data, &num_of_actions); + /* Create a full modify header action list in case shared */ + mlx5dr_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); + mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); - mh_data_size = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE; + /* All DecapL3 cases require the same max arg size */ + arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, + (__be64 *)mh_data, + num_of_actions, + log_bulk_sz, + action->flags & MLX5DR_ACTION_FLAG_SHARED); + if (!arg_obj) + return rte_errno; - ret = mlx5dr_pat_arg_create_modify_header(ctx, action, mh_data_size, - (__be64 *)mh_data, bulk_size); - if (ret) { - DR_LOG(ERR, "Failed allocating modify-header for decap-l3"); - return ret; - } + for (i = 0; i < num_of_hdrs; i++) { + memset(mh_data, 0, MLX5DR_ACTION_REFORMAT_DATA_SIZE); + mlx5dr_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions); + mh_data_size = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE; - ret = mlx5dr_action_create_stcs(action, NULL); - if (ret) - goto free_mh_obj; - - if (action->flags & MLX5DR_ACTION_FLAG_SHARED) { - mlx5dr_action_prepare_decap_l3_data(data, mh_data, num_of_actions); - ret = mlx5dr_arg_write_inline_arg_data(ctx, - action->modify_header.arg_obj->id, - (uint8_t *)mh_data, - num_of_actions * - MLX5DR_MODIFY_ACTION_SIZE); + pat_obj = mlx5dr_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size); + if (!pat_obj) { + DR_LOG(ERR, "Failed to allocate pattern for DecapL3"); + goto free_stc_and_pat; + } + + action[i].modify_header.max_num_of_actions = num_of_actions; + action[i].modify_header.num_of_actions = num_of_actions; + action[i].modify_header.arg_obj = arg_obj; + action[i].modify_header.pat_obj = pat_obj; + + ret = mlx5dr_action_create_stcs(&action[i], NULL); if (ret) { - DR_LOG(ERR, "Failed writing INLINE arg decap_l3"); - goto clean_stc; + mlx5dr_pat_put_pattern(ctx, pat_obj); + goto free_stc_and_pat; } } return 0; -clean_stc: - mlx5dr_action_destroy_stcs(action); -free_mh_obj: - mlx5dr_pat_arg_destroy_modify_header(ctx, action); - return ret; + +free_stc_and_pat: + while (i--) { + mlx5dr_action_destroy_stcs(&action[i]); + mlx5dr_pat_put_pattern(ctx, action[i].modify_header.pat_obj); + } + + mlx5dr_cmd_destroy_obj(arg_obj); + return 0; } static int -mlx5dr_action_create_reformat_hws(struct mlx5dr_context *ctx, - size_t data_sz, - void *data, - uint32_t bulk_size, - struct mlx5dr_action *action) +mlx5dr_action_create_reformat_hws(struct mlx5dr_action *action, + uint8_t num_of_hdrs, + struct mlx5dr_action_reformat_header *hdrs, + uint32_t bulk_size) { int ret; @@ -1462,18 +1449,17 @@ mlx5dr_action_create_reformat_hws(struct mlx5dr_context *ctx, ret = mlx5dr_action_create_stcs(action, NULL); break; case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: - ret = mlx5dr_action_handle_l2_to_tunnel_l2(ctx, data_sz, data, bulk_size, action); + ret = mlx5dr_action_handle_l2_to_tunnel_l2(action, num_of_hdrs, hdrs, bulk_size); break; case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: - ret = mlx5dr_action_handle_l2_to_tunnel_l3(ctx, data_sz, data, bulk_size, action); + ret = mlx5dr_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size); break; case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: - ret = mlx5dr_action_handle_tunnel_l3_to_l2(ctx, data_sz, data, bulk_size, action); + ret = mlx5dr_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size); break; - default: - assert(false); - rte_errno = ENOTSUP; + DR_LOG(ERR, "Invalid HWS reformat action type"); + rte_errno = EINVAL; return rte_errno; } @@ -1483,15 +1469,20 @@ mlx5dr_action_create_reformat_hws(struct mlx5dr_context *ctx, struct mlx5dr_action * mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, enum mlx5dr_action_type reformat_type, - size_t data_sz, - void *inline_data, + uint8_t num_of_hdrs, + struct mlx5dr_action_reformat_header *hdrs, uint32_t log_bulk_size, uint32_t flags) { struct mlx5dr_action *action; int ret; - action = mlx5dr_action_create_generic(ctx, flags, reformat_type); + if (!num_of_hdrs) { + DR_LOG(ERR, "Reformat num_of_hdrs cannot be zero"); + return NULL; + } + + action = mlx5dr_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs); if (!action) return NULL; @@ -1502,24 +1493,27 @@ mlx5dr_action_create_reformat(struct mlx5dr_context *ctx, goto free_action; } - ret = mlx5dr_action_create_reformat_root(action, data_sz, inline_data); - if (ret) + ret = mlx5dr_action_create_reformat_root(action, + hdrs ? hdrs->sz : 0, + hdrs ? hdrs->data : NULL); + if (ret) { + DR_LOG(ERR, "Failed to create root reformat action"); goto free_action; + } return action; } if (!mlx5dr_action_is_hws_flags(flags) || - ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) { - DR_LOG(ERR, "Reformat flags don't fit HWS (flags: %x0x)", - flags); + ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) { + DR_LOG(ERR, "Reformat flags don't fit HWS (flags: %x0x)", flags); rte_errno = EINVAL; goto free_action; } - ret = mlx5dr_action_create_reformat_hws(ctx, data_sz, inline_data, log_bulk_size, action); + ret = mlx5dr_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size); if (ret) { - DR_LOG(ERR, "Failed to create reformat."); + DR_LOG(ERR, "Failed to create HWS reformat action"); rte_errno = EINVAL; goto free_action; } @@ -1559,17 +1553,104 @@ mlx5dr_action_create_modify_header_root(struct mlx5dr_action *action, return 0; } +static int +mlx5dr_action_create_modify_header_hws(struct mlx5dr_action *action, + uint8_t num_of_patterns, + struct mlx5dr_action_mh_pattern *pattern, + uint32_t log_bulk_size) +{ + struct mlx5dr_devx_obj *pat_obj, *arg_obj = NULL; + struct mlx5dr_context *ctx = action->ctx; + uint16_t max_mh_actions = 0; + int i, ret; + + /* Calculate maximum number of mh actions for shared arg allocation */ + for (i = 0; i < num_of_patterns; i++) + max_mh_actions = RTE_MAX(max_mh_actions, pattern[i].sz / MLX5DR_MODIFY_ACTION_SIZE); + + /* Allocate single shared arg for all patterns based on the max size */ + if (max_mh_actions > 1) { + arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, + pattern->data, + max_mh_actions, + log_bulk_size, + action->flags & + MLX5DR_ACTION_FLAG_SHARED); + if (!arg_obj) + return rte_errno; + } + + for (i = 0; i < num_of_patterns; i++) { + if (!mlx5dr_pat_verify_actions(pattern[i].data, pattern[i].sz)) { + DR_LOG(ERR, "Fail to verify pattern modify actions"); + rte_errno = EINVAL; + goto free_stc_and_pat; + } + + action[i].modify_header.num_of_patterns = num_of_patterns; + action[i].modify_header.max_num_of_actions = max_mh_actions; + action[i].modify_header.num_of_actions = pattern[i].sz / MLX5DR_MODIFY_ACTION_SIZE; + + if (action[i].modify_header.num_of_actions == 1) { + pat_obj = NULL; + /* Optimize single modify action to be used inline */ + action[i].modify_header.single_action = pattern[i].data[0]; + action[i].modify_header.single_action_type = + MLX5_GET(set_action_in, pattern[i].data, action_type); + } else { + /* Multiple modify actions require a pattern */ + pat_obj = mlx5dr_pat_get_pattern(ctx, pattern[i].data, pattern[i].sz); + if (!pat_obj) { + DR_LOG(ERR, "Failed to allocate pattern for modify header"); + goto free_stc_and_pat; + } + + action[i].modify_header.arg_obj = arg_obj; + action[i].modify_header.pat_obj = pat_obj; + } + /* Allocate STC for each action representing a header */ + ret = mlx5dr_action_create_stcs(&action[i], NULL); + if (ret) { + if (pat_obj) + mlx5dr_pat_put_pattern(ctx, pat_obj); + goto free_stc_and_pat; + } + } + + return 0; + +free_stc_and_pat: + while (i--) { + mlx5dr_action_destroy_stcs(&action[i]); + if (action[i].modify_header.pat_obj) + mlx5dr_pat_put_pattern(ctx, action[i].modify_header.pat_obj); + } + + if (arg_obj) + mlx5dr_cmd_destroy_obj(arg_obj); + + return rte_errno; +} + struct mlx5dr_action * mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx, - size_t pattern_sz, - __be64 pattern[], + uint8_t num_of_patterns, + struct mlx5dr_action_mh_pattern *patterns, uint32_t log_bulk_size, uint32_t flags) { struct mlx5dr_action *action; int ret; - action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_MODIFY_HDR); + if (!num_of_patterns) { + DR_LOG(ERR, "Invalid number of patterns"); + rte_errno = ENOTSUP; + return NULL; + } + + action = mlx5dr_action_create_generic_bulk(ctx, flags, + MLX5DR_ACTION_TYP_MODIFY_HDR, + num_of_patterns); if (!action) return NULL; @@ -1579,52 +1660,37 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx, rte_errno = ENOTSUP; goto free_action; } - ret = mlx5dr_action_create_modify_header_root(action, pattern_sz, pattern); + + if (num_of_patterns != 1) { + DR_LOG(ERR, "Only a single pattern supported over root"); + rte_errno = ENOTSUP; + goto free_action; + } + + ret = mlx5dr_action_create_modify_header_root(action, + patterns->sz, + patterns->data); if (ret) goto free_action; return action; } - if (!mlx5dr_action_is_hws_flags(flags) || - ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) { - DR_LOG(ERR, "Flags don't fit hws (flags: %x0x, log_bulk_size: %d)", - flags, log_bulk_size); + if ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) { + DR_LOG(ERR, "Action cannot be shared with requested pattern or size"); rte_errno = EINVAL; goto free_action; } - if (!mlx5dr_pat_arg_verify_actions(pattern, pattern_sz / MLX5DR_MODIFY_ACTION_SIZE)) { - DR_LOG(ERR, "One of the actions is not supported"); - rte_errno = EINVAL; - goto free_action; - } - - if (pattern_sz / MLX5DR_MODIFY_ACTION_SIZE == 1) { - /* Optimize single modiy action to be used inline */ - action->modify_header.single_action = pattern[0]; - action->modify_header.num_of_actions = 1; - action->modify_header.single_action_type = - MLX5_GET(set_action_in, pattern, action_type); - } else { - /* Use multi action pattern and argument */ - ret = mlx5dr_pat_arg_create_modify_header(ctx, action, pattern_sz, - pattern, log_bulk_size); - if (ret) { - DR_LOG(ERR, "Failed allocating modify-header"); - goto free_action; - } - } - - ret = mlx5dr_action_create_stcs(action, NULL); + ret = mlx5dr_action_create_modify_header_hws(action, + num_of_patterns, + patterns, + log_bulk_size); if (ret) - goto free_mh_obj; + goto free_action; return action; -free_mh_obj: - if (action->modify_header.num_of_actions > 1) - mlx5dr_pat_arg_destroy_modify_header(ctx, action); free_action: simple_free(action); return NULL; @@ -1684,6 +1750,9 @@ mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx, static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) { + struct mlx5dr_devx_obj *obj = NULL; + uint32_t i; + switch (action->type) { case MLX5DR_ACTION_TYP_TIR: mlx5dr_action_destroy_stcs(action); @@ -1711,17 +1780,28 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) break; case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: case MLX5DR_ACTION_TYP_MODIFY_HDR: - mlx5dr_action_destroy_stcs(action); - if (action->modify_header.num_of_actions > 1) - mlx5dr_pat_arg_destroy_modify_header(action->ctx, action); + for (i = 0; i < action->modify_header.num_of_patterns; i++) { + mlx5dr_action_destroy_stcs(&action[i]); + if (action[i].modify_header.num_of_actions > 1) { + mlx5dr_pat_put_pattern(action[i].ctx, + action[i].modify_header.pat_obj); + /* Save shared arg object if was used to free */ + if (action[i].modify_header.arg_obj) + obj = action[i].modify_header.arg_obj; + } + } + if (obj) + mlx5dr_cmd_destroy_obj(obj); break; case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: - mlx5dr_action_destroy_stcs(action); mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP); + for (i = 0; i < action->reformat.num_of_hdrs; i++) + mlx5dr_action_destroy_stcs(&action[i]); mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); break; case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: - mlx5dr_action_destroy_stcs(action); + for (i = 0; i < action->reformat.num_of_hdrs; i++) + mlx5dr_action_destroy_stcs(&action[i]); mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); break; } @@ -1903,6 +1983,12 @@ mlx5dr_action_prepare_decap_l3_data(uint8_t *src, uint8_t *dst, memcpy(dst, e_src, 2); } +static int mlx5dr_action_get_shared_stc_offset(struct mlx5dr_context_common_res *common_res, + enum mlx5dr_context_shared_stc_type stc_type) +{ + return common_res->shared_stc[stc_type]->remove_header.offset; +} + static struct mlx5dr_actions_wqe_setter * mlx5dr_action_setter_find_first(struct mlx5dr_actions_wqe_setter *setter, uint8_t req_flags) @@ -1945,20 +2031,24 @@ mlx5dr_action_setter_modify_header(struct mlx5dr_actions_apply_data *apply, struct mlx5dr_actions_wqe_setter *setter) { struct mlx5dr_rule_action *rule_action; + uint32_t stc_idx, arg_sz, arg_idx; struct mlx5dr_action *action; - uint32_t arg_sz, arg_idx; uint8_t *single_action; rule_action = &apply->rule_action[setter->idx_double]; - action = rule_action->action; - mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); + action = rule_action->action + rule_action->modify_header.pattern_idx; + + stc_idx = htobe32(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; if (action->modify_header.num_of_actions == 1) { if (action->modify_header.single_action_type == - MLX5_MODIFICATION_TYPE_COPY) { + MLX5_MODIFICATION_TYPE_COPY || + action->modify_header.single_action_type == + MLX5_MODIFICATION_TYPE_ADD_FIELD) { apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0; return; } @@ -1972,7 +2062,7 @@ mlx5dr_action_setter_modify_header(struct mlx5dr_actions_apply_data *apply, *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); } else { /* Argument offset multiple with number of args per these actions */ - arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.num_of_actions); + arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.max_num_of_actions); arg_idx = rule_action->modify_header.offset * arg_sz; apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); @@ -1992,26 +2082,29 @@ mlx5dr_action_setter_insert_ptr(struct mlx5dr_actions_apply_data *apply, struct mlx5dr_actions_wqe_setter *setter) { struct mlx5dr_rule_action *rule_action; - uint32_t arg_idx, arg_sz; + uint32_t stc_idx, arg_idx, arg_sz; + struct mlx5dr_action *action; rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action + rule_action->reformat.hdr_idx; /* Argument offset multiple on args required for header size */ - arg_sz = mlx5dr_arg_data_size_to_arg_size(rule_action->action->reformat.header_size); + arg_sz = mlx5dr_arg_data_size_to_arg_size(action->reformat.max_hdr_sz); arg_idx = rule_action->reformat.offset * arg_sz; apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); - mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); + stc_idx = htobe32(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; - if (!(rule_action->action->flags & MLX5DR_ACTION_FLAG_SHARED)) { + if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { apply->require_dep = 1; mlx5dr_arg_write(apply->queue, NULL, - rule_action->action->reformat.arg_obj->id + arg_idx, + action->reformat.arg_obj->id + arg_idx, rule_action->reformat.data, - rule_action->action->reformat.header_size); + action->reformat.header_size); } } @@ -2020,20 +2113,21 @@ mlx5dr_action_setter_tnl_l3_to_l2(struct mlx5dr_actions_apply_data *apply, struct mlx5dr_actions_wqe_setter *setter) { struct mlx5dr_rule_action *rule_action; + uint32_t stc_idx, arg_sz, arg_idx; struct mlx5dr_action *action; - uint32_t arg_sz, arg_idx; rule_action = &apply->rule_action[setter->idx_double]; - action = rule_action->action; + action = rule_action->action + rule_action->reformat.hdr_idx; /* Argument offset multiple on args required for num of actions */ - arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.num_of_actions); + arg_sz = mlx5dr_arg_get_arg_size(action->modify_header.max_num_of_actions); arg_idx = rule_action->reformat.offset * arg_sz; apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0; apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx); - mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double); + stc_idx = htobe32(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = stc_idx; apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0; if (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) { diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h index a85f3b01392..314e2897802 100644 --- a/drivers/net/mlx5/hws/mlx5dr_action.h +++ b/drivers/net/mlx5/hws/mlx5dr_action.h @@ -120,15 +120,19 @@ struct mlx5dr_action { struct mlx5dr_pool_chunk stc[MLX5DR_TABLE_TYPE_MAX]; union { struct { - struct mlx5dr_devx_obj *pattern_obj; + struct mlx5dr_devx_obj *pat_obj; struct mlx5dr_devx_obj *arg_obj; __be64 single_action; + uint8_t num_of_patterns; uint8_t single_action_type; - uint16_t num_of_actions; + uint8_t num_of_actions; + uint8_t max_num_of_actions; } modify_header; struct { struct mlx5dr_devx_obj *arg_obj; uint32_t header_size; + uint8_t num_of_hdrs; + uint16_t max_hdr_sz; } reformat; struct { struct mlx5dr_devx_obj *devx_obj; diff --git a/drivers/net/mlx5/hws/mlx5dr_buddy.c b/drivers/net/mlx5/hws/mlx5dr_buddy.c index cde4f54f662..394ca712176 100644 --- a/drivers/net/mlx5/hws/mlx5dr_buddy.c +++ b/drivers/net/mlx5/hws/mlx5dr_buddy.c @@ -61,7 +61,7 @@ static unsigned long bitmap_ffs(struct rte_bitmap *bmap, DR_LOG(ERR, "Failed to get slab from bitmap."); return m; } - pos = pos + __builtin_ctzll(out_slab); + pos = pos + rte_ctz64(out_slab); if (pos < n) { DR_LOG(ERR, "Unexpected bit (%d < %"PRIx64") from bitmap", pos, n); diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c index f9f220cc6ac..594c59aee36 100644 --- a/drivers/net/mlx5/hws/mlx5dr_cmd.c +++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c @@ -4,6 +4,12 @@ #include "mlx5dr_internal.h" +static uint32_t mlx5dr_cmd_get_syndrome(uint32_t *out) +{ + /* Assumption: syndrome is always the second u32 */ + return be32toh(out[1]); +} + int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj) { int ret; @@ -39,7 +45,8 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create FT"); + DR_LOG(ERR, "Failed to create FT (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -73,7 +80,8 @@ mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj, ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); if (ret) { - DR_LOG(ERR, "Failed to modify FT"); + DR_LOG(ERR, "Failed to modify FT (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); rte_errno = errno; } @@ -96,7 +104,8 @@ mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj, ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out)); if (ret) { - DR_LOG(ERR, "Failed to query FT"); + DR_LOG(ERR, "Failed to query FT (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); rte_errno = errno; return ret; } @@ -129,7 +138,8 @@ mlx5dr_cmd_flow_group_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create Flow group"); + DR_LOG(ERR, "Failed to create Flow group(syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -182,7 +192,8 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create FTE"); + DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); rte_errno = errno; goto free_devx; } @@ -325,7 +336,8 @@ mlx5dr_cmd_rtc_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create RTC"); + DR_LOG(ERR, "Failed to create RTC (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -365,7 +377,8 @@ mlx5dr_cmd_stc_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create STC"); + DR_LOG(ERR, "Failed to create STC (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -426,6 +439,7 @@ mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr, case MLX5_IFC_STC_ACTION_TYPE_COPY: case MLX5_IFC_STC_ACTION_TYPE_SET: case MLX5_IFC_STC_ACTION_TYPE_ADD: + case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD: *(__be64 *)stc_parm = stc_attr->modify_action.data; break; case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: @@ -505,7 +519,8 @@ mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj, ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); if (ret) { - DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type); + DR_LOG(ERR, "Failed to modify STC FW action_type %d (syndrome: %#x)", + stc_attr->action_type, mlx5dr_cmd_get_syndrome(out)); rte_errno = errno; } @@ -542,7 +557,8 @@ mlx5dr_cmd_arg_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create ARG"); + DR_LOG(ERR, "Failed to create ARG (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -580,7 +596,6 @@ mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, rte_errno = ENOMEM; return NULL; } - attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); MLX5_SET(general_obj_in_cmd_hdr, attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); @@ -599,14 +614,16 @@ mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, int type; type = MLX5_GET(set_action_in, &pattern_data[i], action_type); - if (type != MLX5_MODIFICATION_TYPE_COPY) + if (type != MLX5_MODIFICATION_TYPE_COPY && + type != MLX5_MODIFICATION_TYPE_ADD_FIELD) /* Action typ-copy use all bytes for control */ MLX5_SET(set_action_in, &pattern_data[i], data, 0); } devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create header_modify_pattern"); + DR_LOG(ERR, "Failed to create header_modify_pattern (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); rte_errno = errno; goto free_obj; } @@ -649,7 +666,8 @@ mlx5dr_cmd_ste_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create STE"); + DR_LOG(ERR, "Failed to create STE (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -708,7 +726,8 @@ mlx5dr_cmd_definer_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create Definer"); + DR_LOG(ERR, "Failed to create Definer (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -775,7 +794,8 @@ int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj) ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); if (ret) { - DR_LOG(ERR, "Failed to modify SQ"); + DR_LOG(ERR, "Failed to modify SQ (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); rte_errno = errno; } @@ -843,7 +863,8 @@ mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx, devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { - DR_LOG(ERR, "Failed to create ALIAS OBJ"); + DR_LOG(ERR, "Failed to create ALIAS OBJ (syndrome: %#x)", + mlx5dr_cmd_get_syndrome(out)); simple_free(devx_obj); rte_errno = errno; return NULL; @@ -1030,6 +1051,12 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx, capability.flow_table_nic_cap. flow_table_properties_nic_receive.reparse); + caps->nic_ft.ignore_flow_level_rtc_valid = + MLX5_GET(query_hca_cap_out, + out, + capability.flow_table_nic_cap. + flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); + /* check cross-VHCA support in flow table properties */ res = MLX5_GET(query_hca_cap_out, out, diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h index e57013c3095..8a495db9b3c 100644 --- a/drivers/net/mlx5/hws/mlx5dr_cmd.h +++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h @@ -158,6 +158,7 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr { struct mlx5dr_cmd_query_ft_caps { uint8_t max_level; uint8_t reparse; + uint8_t ignore_flow_level_rtc_valid; }; struct mlx5dr_cmd_query_vport_caps { diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c index 48810142a0c..89529944a3b 100644 --- a/drivers/net/mlx5/hws/mlx5dr_debug.c +++ b/drivers/net/mlx5/hws/mlx5dr_debug.c @@ -331,11 +331,12 @@ static int mlx5dr_debug_dump_table(FILE *f, struct mlx5dr_table *tbl) } } - ret = fprintf(f, ",0x%" PRIx64 ",0x%" PRIx64 ",0x%" PRIx64 ",0x%" PRIx64 "\n", + ret = fprintf(f, ",0x%" PRIx64 ",0x%" PRIx64 ",0x%" PRIx64 ",0x%" PRIx64 ",0x%" PRIx64 "\n", mlx5dr_debug_icm_to_idx(icm_addr_0), mlx5dr_debug_icm_to_idx(icm_addr_1), mlx5dr_debug_icm_to_idx(local_icm_addr_0), - mlx5dr_debug_icm_to_idx(local_icm_addr_1)); + mlx5dr_debug_icm_to_idx(local_icm_addr_1), + (uint64_t)(uintptr_t)tbl->default_miss.miss_tbl); if (ret < 0) goto out_err; diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index 33d0f2d18ec..95b5d4b70e8 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -177,7 +177,8 @@ struct mlx5dr_definer_conv_data { X(SET_BE32, ipsec_spi, v->hdr.spi, rte_flow_item_esp) \ X(SET_BE32, ipsec_sequence_number, v->hdr.seq, rte_flow_item_esp) \ X(SET, ib_l4_udp_port, UDP_ROCEV2_PORT, rte_flow_item_ib_bth) \ - X(SET, ib_l4_opcode, v->hdr.opcode, rte_flow_item_ib_bth) + X(SET, ib_l4_opcode, v->hdr.opcode, rte_flow_item_ib_bth) \ + X(SET, ib_l4_bth_a, v->hdr.a, rte_flow_item_ib_bth) \ /* Item set function format */ #define X(set_type, func_name, value, item_type) \ @@ -1320,35 +1321,24 @@ mlx5dr_definer_conv_item_mpls(struct mlx5dr_definer_conv_data *cd, return rte_errno; } - if (cd->relaxed) { - DR_LOG(ERR, "Relaxed mode is not supported"); - rte_errno = ENOTSUP; - return rte_errno; - } - - /* Currently support only MPLSoUDP */ - if (cd->last_item != RTE_FLOW_ITEM_TYPE_UDP && - cd->last_item != RTE_FLOW_ITEM_TYPE_MPLS) { - DR_LOG(ERR, "MPLS supported only after UDP"); - rte_errno = ENOTSUP; - return rte_errno; - } - - /* In order to match on MPLS we must match on ip_protocol and l4_dport. */ - fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)]; - if (!fc->tag_set) { - fc->item_idx = item_idx; - fc->tag_mask_set = &mlx5dr_definer_ones_set; - fc->tag_set = &mlx5dr_definer_udp_protocol_set; - DR_CALC_SET(fc, eth_l2, l4_type_bwc, false); - } + if (!cd->relaxed) { + /* In order to match on MPLS we must match on ip_protocol and l4_dport. */ + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)]; + if (!fc->tag_set) { + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + fc->tag_set = &mlx5dr_definer_udp_protocol_set; + DR_CALC_SET(fc, eth_l2, l4_type_bwc, false); + } - fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; - if (!fc->tag_set) { - fc->item_idx = item_idx; - fc->tag_mask_set = &mlx5dr_definer_ones_set; - fc->tag_set = &mlx5dr_definer_mpls_udp_port_set; - DR_CALC_SET(fc, eth_l4, destination_port, false); + /* Currently support only MPLSoUDP */ + fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; + if (!fc->tag_set) { + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + fc->tag_set = &mlx5dr_definer_mpls_udp_port_set; + DR_CALC_SET(fc, eth_l4, destination_port, false); + } } if (m && (!is_mem_zero(m->label_tc_s, 3) || m->ttl)) { @@ -1411,6 +1401,22 @@ mlx5dr_definer_get_register_fc(struct mlx5dr_definer_conv_data *cd, int reg) fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_7]; DR_CALC_SET_HDR(fc, registers, register_c_7); break; + case REG_C_8: + fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_8]; + DR_CALC_SET_HDR(fc, registers, register_c_8); + break; + case REG_C_9: + fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_9]; + DR_CALC_SET_HDR(fc, registers, register_c_9); + break; + case REG_C_10: + fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_10]; + DR_CALC_SET_HDR(fc, registers, register_c_10); + break; + case REG_C_11: + fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_11]; + DR_CALC_SET_HDR(fc, registers, register_c_11); + break; case REG_A: fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_A]; DR_CALC_SET_HDR(fc, metadata, general_purpose); @@ -2148,7 +2154,7 @@ mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd, if (m->hdr.se || m->hdr.m || m->hdr.padcnt || m->hdr.tver || m->hdr.pkey || m->hdr.f || m->hdr.b || m->hdr.rsvd0 || - m->hdr.a || m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) { + m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) { rte_errno = ENOTSUP; return rte_errno; } @@ -2167,6 +2173,13 @@ mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd, DR_CALC_SET_HDR(fc, ib_l4, qp); } + if (m->hdr.a) { + fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_A]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ib_l4_bth_a_set; + DR_CALC_SET_HDR(fc, ib_l4, ackreq); + } + return 0; } @@ -2351,11 +2364,15 @@ mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer, uint32_t *tag_byte_off) { uint8_t byte_offset; - int i; + int i, dw_to_scan; + + /* Avoid accessing unused DW selectors */ + dw_to_scan = mlx5dr_definer_is_jumbo(definer) ? + DW_SELECTORS : DW_SELECTORS_MATCH; /* Add offset since each DW covers multiple BYTEs */ byte_offset = hl_byte_off % DW_SIZE; - for (i = 0; i < DW_SELECTORS; i++) { + for (i = 0; i < dw_to_scan; i++) { if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) { *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1); return 0; diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h index 6b645f4cf0c..f5a541bc17c 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.h +++ b/drivers/net/mlx5/hws/mlx5dr_definer.h @@ -100,6 +100,10 @@ enum mlx5dr_definer_fname { MLX5DR_DEFINER_FNAME_REG_5, MLX5DR_DEFINER_FNAME_REG_6, MLX5DR_DEFINER_FNAME_REG_7, + MLX5DR_DEFINER_FNAME_REG_8, + MLX5DR_DEFINER_FNAME_REG_9, + MLX5DR_DEFINER_FNAME_REG_10, + MLX5DR_DEFINER_FNAME_REG_11, MLX5DR_DEFINER_FNAME_REG_A, MLX5DR_DEFINER_FNAME_REG_B, MLX5DR_DEFINER_FNAME_GRE_KEY_PRESENT, @@ -136,6 +140,7 @@ enum mlx5dr_definer_fname { MLX5DR_DEFINER_FNAME_OKS2_MPLS4_I, MLX5DR_DEFINER_FNAME_IB_L4_OPCODE, MLX5DR_DEFINER_FNAME_IB_L4_QPN, + MLX5DR_DEFINER_FNAME_IB_L4_A, MLX5DR_DEFINER_FNAME_MAX, }; diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.c b/drivers/net/mlx5/hws/mlx5dr_matcher.c index 1fe7ec1bc3f..a82c182460a 100644 --- a/drivers/net/mlx5/hws/mlx5dr_matcher.c +++ b/drivers/net/mlx5/hws/mlx5dr_matcher.c @@ -43,29 +43,21 @@ static void mlx5dr_matcher_destroy_end_ft(struct mlx5dr_matcher *matcher) mlx5dr_table_destroy_default_ft(matcher->tbl, matcher->end_ft); } -static int mlx5dr_matcher_free_rtc_pointing(struct mlx5dr_context *ctx, - uint32_t fw_ft_type, - enum mlx5dr_table_type type, - struct mlx5dr_devx_obj *devx_obj) +int mlx5dr_matcher_free_rtc_pointing(struct mlx5dr_context *ctx, + uint32_t fw_ft_type, + enum mlx5dr_table_type type, + struct mlx5dr_devx_obj *devx_obj) { - struct mlx5dr_cmd_ft_modify_attr ft_attr = {0}; int ret; if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx)) return 0; - ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; - ft_attr.type = fw_ft_type; - ft_attr.rtc_id_0 = 0; - ft_attr.rtc_id_1 = 0; - - ret = mlx5dr_cmd_flow_table_modify(devx_obj, &ft_attr); - if (ret) { + ret = mlx5dr_table_ft_set_next_rtc(devx_obj, fw_ft_type, NULL, NULL); + if (ret) DR_LOG(ERR, "Failed to disconnect previous RTC"); - return ret; - } - return 0; + return ret; } static int mlx5dr_matcher_shared_point_end_ft(struct mlx5dr_matcher *matcher) @@ -200,12 +192,10 @@ static int mlx5dr_matcher_shared_update_local_ft(struct mlx5dr_table *tbl) static int mlx5dr_matcher_connect(struct mlx5dr_matcher *matcher) { - struct mlx5dr_cmd_ft_modify_attr ft_attr = {0}; struct mlx5dr_table *tbl = matcher->tbl; struct mlx5dr_matcher *prev = NULL; struct mlx5dr_matcher *next = NULL; struct mlx5dr_matcher *tmp_matcher; - struct mlx5dr_devx_obj *ft; int ret; /* Find location in matcher list */ @@ -228,32 +218,30 @@ static int mlx5dr_matcher_connect(struct mlx5dr_matcher *matcher) LIST_INSERT_AFTER(prev, matcher, next); connect: - ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; - ft_attr.type = tbl->fw_ft_type; - - /* Connect to next */ if (next) { - if (next->match_ste.rtc_0) - ft_attr.rtc_id_0 = next->match_ste.rtc_0->id; - if (next->match_ste.rtc_1) - ft_attr.rtc_id_1 = next->match_ste.rtc_1->id; - - ret = mlx5dr_cmd_flow_table_modify(matcher->end_ft, &ft_attr); + /* Connect to next RTC */ + ret = mlx5dr_table_ft_set_next_rtc(matcher->end_ft, + tbl->fw_ft_type, + next->match_ste.rtc_0, + next->match_ste.rtc_1); if (ret) { DR_LOG(ERR, "Failed to connect new matcher to next RTC"); goto remove_from_list; } + } else { + /* Connect last matcher to next miss_tbl if exists */ + ret = mlx5dr_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + if (ret) { + DR_LOG(ERR, "Failed connect new matcher to miss_tbl"); + goto remove_from_list; + } } - /* Connect to previous */ - ft = prev ? prev->end_ft : tbl->ft; - - if (matcher->match_ste.rtc_0) - ft_attr.rtc_id_0 = matcher->match_ste.rtc_0->id; - if (matcher->match_ste.rtc_1) - ft_attr.rtc_id_1 = matcher->match_ste.rtc_1->id; - - ret = mlx5dr_cmd_flow_table_modify(ft, &ft_attr); + /* Connect to previous FT */ + ret = mlx5dr_table_ft_set_next_rtc(prev ? prev->end_ft : tbl->ft, + tbl->fw_ft_type, + matcher->match_ste.rtc_0, + matcher->match_ste.rtc_1); if (ret) { DR_LOG(ERR, "Failed to connect new matcher to previous FT"); goto remove_from_list; @@ -265,6 +253,22 @@ static int mlx5dr_matcher_connect(struct mlx5dr_matcher *matcher) goto remove_from_list; } + if (prev) { + /* Reset next miss FT to default (drop refcount) */ + ret = mlx5dr_table_ft_set_default_next_ft(tbl, prev->end_ft); + if (ret) { + DR_LOG(ERR, "Failed to reset matcher ft default miss"); + goto remove_from_list; + } + } else { + /* Update tables missing to current table */ + ret = mlx5dr_table_update_connected_miss_tables(tbl); + if (ret) { + DR_LOG(ERR, "Fatal error, failed to update connected miss table"); + goto remove_from_list; + } + } + return 0; remove_from_list: @@ -272,81 +276,97 @@ static int mlx5dr_matcher_connect(struct mlx5dr_matcher *matcher) return ret; } -static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher) +static int mlx5dr_last_matcher_disconnect(struct mlx5dr_table *tbl, + struct mlx5dr_devx_obj *prev_ft) { struct mlx5dr_cmd_ft_modify_attr ft_attr = {0}; + + if (tbl->default_miss.miss_tbl) { + /* Connect new last matcher to next miss_tbl if exists */ + return mlx5dr_table_connect_to_miss_table(tbl, + tbl->default_miss.miss_tbl); + } else { + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; + ft_attr.type = tbl->fw_ft_type; + /* Matcher is last, point prev end FT to default miss */ + mlx5dr_cmd_set_attr_connect_miss_tbl(tbl->ctx, + tbl->fw_ft_type, + tbl->type, + &ft_attr); + return mlx5dr_cmd_flow_table_modify(prev_ft, &ft_attr); + } +} + +static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_matcher *tmp_matcher, *prev_matcher; struct mlx5dr_table *tbl = matcher->tbl; - struct mlx5dr_matcher *tmp_matcher; struct mlx5dr_devx_obj *prev_ft; struct mlx5dr_matcher *next; int ret; - prev_ft = matcher->tbl->ft; + prev_ft = tbl->ft; + prev_matcher = LIST_FIRST(&tbl->head); LIST_FOREACH(tmp_matcher, &tbl->head, next) { if (tmp_matcher == matcher) break; prev_ft = tmp_matcher->end_ft; + prev_matcher = tmp_matcher; } next = matcher->next.le_next; - ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; - ft_attr.type = matcher->tbl->fw_ft_type; + LIST_REMOVE(matcher, next); if (next) { - /* Connect previous end FT to next RTC if exists */ - if (next->match_ste.rtc_0) - ft_attr.rtc_id_0 = next->match_ste.rtc_0->id; - if (next->match_ste.rtc_1) - ft_attr.rtc_id_1 = next->match_ste.rtc_1->id; + /* Connect previous end FT to next RTC */ + ret = mlx5dr_table_ft_set_next_rtc(prev_ft, + tbl->fw_ft_type, + next->match_ste.rtc_0, + next->match_ste.rtc_1); + if (ret) { + DR_LOG(ERR, "Failed to disconnect matcher"); + goto matcher_reconnect; + } } else { - /* Matcher is last, point prev end FT to default miss */ - mlx5dr_cmd_set_attr_connect_miss_tbl(tbl->ctx, - tbl->fw_ft_type, - tbl->type, - &ft_attr); - } - - ret = mlx5dr_cmd_flow_table_modify(prev_ft, &ft_attr); - if (ret) { - DR_LOG(ERR, "Failed to disconnect matcher"); - return ret; - } - - LIST_REMOVE(matcher, next); - - if (!next) { - /* ft no longer points to any RTC, drop refcount */ - ret = mlx5dr_matcher_free_rtc_pointing(tbl->ctx, - tbl->fw_ft_type, - tbl->type, - prev_ft); + ret = mlx5dr_last_matcher_disconnect(tbl, prev_ft); if (ret) { - DR_LOG(ERR, "Failed to reset last RTC refcount"); - return ret; + DR_LOG(ERR, "Failed to disconnect last matcher"); + goto matcher_reconnect; } } ret = mlx5dr_matcher_shared_update_local_ft(tbl); if (ret) { DR_LOG(ERR, "Failed to update local_ft in shared table"); - return ret; + goto matcher_reconnect; } - if (!next) { - /* ft no longer points to any RTC, drop refcount */ - ret = mlx5dr_matcher_free_rtc_pointing(tbl->ctx, - tbl->fw_ft_type, - tbl->type, - prev_ft); + /* Removing first matcher, update connected miss tables if exists */ + if (prev_ft == tbl->ft) { + ret = mlx5dr_table_update_connected_miss_tables(tbl); if (ret) { - DR_LOG(ERR, "Failed to reset last RTC refcount"); - return ret; + DR_LOG(ERR, "Fatal error, failed to update connected miss table"); + goto matcher_reconnect; } } + ret = mlx5dr_table_ft_set_default_next_ft(tbl, prev_ft); + if (ret) { + DR_LOG(ERR, "Fatal error, failed to restore matcher ft default miss"); + goto matcher_reconnect; + } + return 0; + +matcher_reconnect: + if (LIST_EMPTY(&tbl->head)) + LIST_INSERT_HEAD(&matcher->tbl->head, matcher, next); + else + LIST_INSERT_AFTER(prev_matcher, matcher, next); + + return ret; } static bool mlx5dr_matcher_supp_fw_wqe(struct mlx5dr_matcher *matcher) @@ -680,6 +700,30 @@ static void mlx5dr_matcher_set_pool_attr(struct mlx5dr_pool_attr *attr, } } +static int mlx5dr_matcher_check_and_process_at(struct mlx5dr_matcher *matcher, + struct mlx5dr_action_template *at) +{ + bool valid; + int ret; + + /* Check if action combinabtion is valid */ + valid = mlx5dr_action_check_combo(at->action_type_arr, matcher->tbl->type); + if (!valid) { + DR_LOG(ERR, "Invalid combination in action template"); + rte_errno = EINVAL; + return rte_errno; + } + + /* Process action template to setters */ + ret = mlx5dr_action_template_process(at); + if (ret) { + DR_LOG(ERR, "Failed to process action template"); + return ret; + } + + return 0; +} + static int mlx5dr_matcher_bind_at(struct mlx5dr_matcher *matcher) { bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt); @@ -689,22 +733,16 @@ static int mlx5dr_matcher_bind_at(struct mlx5dr_matcher *matcher) struct mlx5dr_context *ctx = tbl->ctx; uint32_t required_stes; int i, ret; - bool valid; + + if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION) + return 0; for (i = 0; i < matcher->num_of_at; i++) { struct mlx5dr_action_template *at = &matcher->at[i]; - /* Check if action combinabtion is valid */ - valid = mlx5dr_action_check_combo(at->action_type_arr, matcher->tbl->type); - if (!valid) { - DR_LOG(ERR, "Invalid combination in action template %d", i); - return rte_errno; - } - - /* Process action template to setters */ - ret = mlx5dr_action_template_process(at); + ret = mlx5dr_matcher_check_and_process_at(matcher, at); if (ret) { - DR_LOG(ERR, "Failed to process action template %d", i); + DR_LOG(ERR, "Invalid at %d", i); return rte_errno; } @@ -771,7 +809,7 @@ static void mlx5dr_matcher_unbind_at(struct mlx5dr_matcher *matcher) { struct mlx5dr_table *tbl = matcher->tbl; - if (!matcher->action_ste.max_stes) + if (!matcher->action_ste.max_stes || matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION) return; mlx5dr_action_free_single_stc(tbl->ctx, tbl->type, &matcher->action_ste.stc); @@ -924,6 +962,10 @@ mlx5dr_matcher_process_attr(struct mlx5dr_cmd_query_caps *caps, DR_LOG(ERR, "Root matcher can't specify FDB direction"); goto not_supported; } + if (attr->max_num_of_at_attach) { + DR_LOG(ERR, "Root matcher does not support at attaching"); + goto not_supported; + } return 0; } @@ -1039,6 +1081,8 @@ mlx5dr_matcher_create_col_matcher(struct mlx5dr_matcher *matcher) if (col_matcher->attr.table.sz_row_log > MLX5DR_MATCHER_ASSURED_ROW_RATIO) col_matcher->attr.table.sz_row_log -= MLX5DR_MATCHER_ASSURED_ROW_RATIO; + col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach; + ret = mlx5dr_matcher_process_attr(ctx->caps, col_matcher, false); if (ret) goto free_col_matcher; @@ -1212,6 +1256,42 @@ static int mlx5dr_matcher_uninit_root(struct mlx5dr_matcher *matcher) return ret; } +int mlx5dr_matcher_attach_at(struct mlx5dr_matcher *matcher, + struct mlx5dr_action_template *at) +{ + bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt); + uint32_t required_stes; + int ret; + + if (!matcher->attr.max_num_of_at_attach) { + DR_LOG(ERR, "Num of current at (%d) exceed allowed value", + matcher->num_of_at); + rte_errno = ENOTSUP; + return -rte_errno; + } + + ret = mlx5dr_matcher_check_and_process_at(matcher, at); + if (ret) + return -rte_errno; + + required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); + if (matcher->action_ste.max_stes < required_stes) { + DR_LOG(ERR, "Required STEs [%d] exceeds initial action template STE [%d]", + required_stes, matcher->action_ste.max_stes); + rte_errno = ENOMEM; + return -rte_errno; + } + + matcher->at[matcher->num_of_at] = *at; + matcher->num_of_at += 1; + matcher->attr.max_num_of_at_attach -= 1; + + if (matcher->col_matcher) + matcher->col_matcher->num_of_at = matcher->num_of_at; + + return 0; +} + static int mlx5dr_matcher_set_templates(struct mlx5dr_matcher *matcher, struct mlx5dr_match_template *mt[], @@ -1241,7 +1321,8 @@ mlx5dr_matcher_set_templates(struct mlx5dr_matcher *matcher, return rte_errno; } - matcher->at = simple_calloc(num_of_at, sizeof(*matcher->at)); + matcher->at = simple_calloc(num_of_at + matcher->attr.max_num_of_at_attach, + sizeof(*matcher->at)); if (!matcher->at) { DR_LOG(ERR, "Failed to allocate action template array"); rte_errno = ENOMEM; diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.h b/drivers/net/mlx5/hws/mlx5dr_matcher.h index 4759068ab45..363a61fd416 100644 --- a/drivers/net/mlx5/hws/mlx5dr_matcher.h +++ b/drivers/net/mlx5/hws/mlx5dr_matcher.h @@ -115,4 +115,9 @@ static inline bool mlx5dr_matcher_is_insert_by_idx(struct mlx5dr_matcher *matche return matcher->attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX; } +int mlx5dr_matcher_free_rtc_pointing(struct mlx5dr_context *ctx, + uint32_t fw_ft_type, + enum mlx5dr_table_type type, + struct mlx5dr_devx_obj *devx_obj); + #endif /* MLX5DR_MATCHER_H_ */ diff --git a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c index 309a61d477c..349d77f2960 100644 --- a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c +++ b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c @@ -60,27 +60,22 @@ void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache) simple_free(cache); } -static bool mlx5dr_pat_compare_pattern(enum mlx5dr_action_type cur_type, - int cur_num_of_actions, +static bool mlx5dr_pat_compare_pattern(int cur_num_of_actions, __be64 cur_actions[], - enum mlx5dr_action_type type, int num_of_actions, __be64 actions[]) { int i; - if (cur_num_of_actions != num_of_actions || cur_type != type) + if (cur_num_of_actions != num_of_actions) return false; - /* All decap-l3 look the same, only change is the num of actions */ - if (type == MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) - return true; - for (i = 0; i < num_of_actions; i++) { u8 action_id = MLX5_GET(set_action_in, &actions[i], action_type); - if (action_id == MLX5_MODIFICATION_TYPE_COPY) { + if (action_id == MLX5_MODIFICATION_TYPE_COPY || + action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) { if (actions[i] != cur_actions[i]) return false; } else { @@ -96,17 +91,14 @@ static bool mlx5dr_pat_compare_pattern(enum mlx5dr_action_type cur_type, static struct mlx5dr_pattern_cache_item * mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache *cache, - struct mlx5dr_action *action, uint16_t num_of_actions, __be64 *actions) { struct mlx5dr_pattern_cache_item *cached_pat; LIST_FOREACH(cached_pat, &cache->head, next) { - if (mlx5dr_pat_compare_pattern(cached_pat->type, - cached_pat->mh_data.num_of_actions, + if (mlx5dr_pat_compare_pattern(cached_pat->mh_data.num_of_actions, (__be64 *)cached_pat->mh_data.data, - action->type, num_of_actions, actions)) return cached_pat; @@ -117,13 +109,12 @@ mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache *cache, static struct mlx5dr_pattern_cache_item * mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache *cache, - struct mlx5dr_action *action, uint16_t num_of_actions, __be64 *actions) { struct mlx5dr_pattern_cache_item *cached_pattern; - cached_pattern = mlx5dr_pat_find_cached_pattern(cache, action, num_of_actions, actions); + cached_pattern = mlx5dr_pat_find_cached_pattern(cache, num_of_actions, actions); if (cached_pattern) { /* LRU: move it to be first in the list */ LIST_REMOVE(cached_pattern, next); @@ -134,24 +125,9 @@ mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache *cache, return cached_pattern; } -static struct mlx5dr_pattern_cache_item * -mlx5dr_pat_get_cached_pattern_by_action(struct mlx5dr_pattern_cache *cache, - struct mlx5dr_action *action) -{ - struct mlx5dr_pattern_cache_item *cached_pattern; - - LIST_FOREACH(cached_pattern, &cache->head, next) { - if (cached_pattern->mh_data.pattern_obj->id == action->modify_header.pattern_obj->id) - return cached_pattern; - } - - return NULL; -} - static struct mlx5dr_pattern_cache_item * mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache, struct mlx5dr_devx_obj *pattern_obj, - enum mlx5dr_action_type type, uint16_t num_of_actions, __be64 *actions) { @@ -164,7 +140,6 @@ mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache, return NULL; } - cached_pattern->type = type; cached_pattern->mh_data.num_of_actions = num_of_actions; cached_pattern->mh_data.pattern_obj = pattern_obj; cached_pattern->mh_data.data = @@ -188,6 +163,20 @@ mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache, return NULL; } +static struct mlx5dr_pattern_cache_item * +mlx5dr_pat_find_cached_pattern_by_obj(struct mlx5dr_pattern_cache *cache, + struct mlx5dr_devx_obj *pat_obj) +{ + struct mlx5dr_pattern_cache_item *cached_pattern; + + LIST_FOREACH(cached_pattern, &cache->head, next) { + if (cached_pattern->mh_data.pattern_obj->id == pat_obj->id) + return cached_pattern; + } + + return NULL; +} + static void mlx5dr_pat_remove_pattern(struct mlx5dr_pattern_cache_item *cached_pattern) { @@ -196,14 +185,14 @@ mlx5dr_pat_remove_pattern(struct mlx5dr_pattern_cache_item *cached_pattern) simple_free(cached_pattern); } -static void -mlx5dr_pat_put_pattern(struct mlx5dr_pattern_cache *cache, - struct mlx5dr_action *action) +void mlx5dr_pat_put_pattern(struct mlx5dr_context *ctx, + struct mlx5dr_devx_obj *pat_obj) { + struct mlx5dr_pattern_cache *cache = ctx->pattern_cache; struct mlx5dr_pattern_cache_item *cached_pattern; pthread_spin_lock(&cache->lock); - cached_pattern = mlx5dr_pat_get_cached_pattern_by_action(cache, action); + cached_pattern = mlx5dr_pat_find_cached_pattern_by_obj(cache, pat_obj); if (!cached_pattern) { DR_LOG(ERR, "Failed to find pattern according to action with pt"); assert(false); @@ -214,62 +203,56 @@ mlx5dr_pat_put_pattern(struct mlx5dr_pattern_cache *cache, goto out; mlx5dr_pat_remove_pattern(cached_pattern); + mlx5dr_cmd_destroy_obj(pat_obj); out: pthread_spin_unlock(&cache->lock); } -static int mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx, - struct mlx5dr_action *action, - uint16_t num_of_actions, - size_t pattern_sz, - __be64 *pattern) +struct mlx5dr_devx_obj * +mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx, + __be64 *pattern, size_t pattern_sz) { + uint16_t num_of_actions = pattern_sz / MLX5DR_MODIFY_ACTION_SIZE; struct mlx5dr_pattern_cache_item *cached_pattern; - int ret = 0; + struct mlx5dr_devx_obj *pat_obj = NULL; pthread_spin_lock(&ctx->pattern_cache->lock); cached_pattern = mlx5dr_pat_get_existing_cached_pattern(ctx->pattern_cache, - action, num_of_actions, pattern); if (cached_pattern) { - action->modify_header.pattern_obj = cached_pattern->mh_data.pattern_obj; + pat_obj = cached_pattern->mh_data.pattern_obj; goto out_unlock; } - action->modify_header.pattern_obj = - mlx5dr_cmd_header_modify_pattern_create(ctx->ibv_ctx, - pattern_sz, - (uint8_t *)pattern); - if (!action->modify_header.pattern_obj) { + pat_obj = mlx5dr_cmd_header_modify_pattern_create(ctx->ibv_ctx, + pattern_sz, + (uint8_t *)pattern); + if (!pat_obj) { DR_LOG(ERR, "Failed to create pattern FW object"); - - ret = rte_errno; goto out_unlock; } - cached_pattern = - mlx5dr_pat_add_pattern_to_cache(ctx->pattern_cache, - action->modify_header.pattern_obj, - action->type, - num_of_actions, - pattern); + cached_pattern = mlx5dr_pat_add_pattern_to_cache(ctx->pattern_cache, + pat_obj, + num_of_actions, + pattern); if (!cached_pattern) { DR_LOG(ERR, "Failed to add pattern to cache"); - ret = rte_errno; goto clean_pattern; } -out_unlock: pthread_spin_unlock(&ctx->pattern_cache->lock); - return ret; + return pat_obj; clean_pattern: - mlx5dr_cmd_destroy_obj(action->modify_header.pattern_obj); + mlx5dr_cmd_destroy_obj(pat_obj); + pat_obj = NULL; +out_unlock: pthread_spin_unlock(&ctx->pattern_cache->lock); - return ret; + return pat_obj; } static void @@ -388,64 +371,80 @@ bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx, return true; } -static int -mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx, - struct mlx5dr_action *action, - uint16_t num_of_actions, - __be64 *pattern, - uint32_t bulk_size) +struct mlx5dr_devx_obj * +mlx5dr_arg_create(struct mlx5dr_context *ctx, + uint8_t *data, + size_t data_sz, + uint32_t log_bulk_sz, + bool write_data) { - uint32_t flags = action->flags; - uint16_t args_log_size; - int ret = 0; + struct mlx5dr_devx_obj *arg_obj; + uint16_t single_arg_log_sz; + uint16_t multi_arg_log_sz; + int ret; - /* Alloc bulk of args */ - args_log_size = mlx5dr_arg_get_arg_log_size(num_of_actions); - if (args_log_size >= MLX5DR_ARG_CHUNK_SIZE_MAX) { - DR_LOG(ERR, "Exceed number of allowed actions %u", - num_of_actions); - rte_errno = EINVAL; - return rte_errno; + single_arg_log_sz = mlx5dr_arg_data_size_to_arg_log_size(data_sz); + multi_arg_log_sz = single_arg_log_sz + log_bulk_sz; + + if (single_arg_log_sz >= MLX5DR_ARG_CHUNK_SIZE_MAX) { + DR_LOG(ERR, "Requested single arg %u not supported", single_arg_log_sz); + rte_errno = ENOTSUP; + return NULL; } - if (!mlx5dr_arg_is_valid_arg_request_size(ctx, args_log_size + bulk_size)) { - DR_LOG(ERR, "Arg size %d does not fit FW capability", - args_log_size + bulk_size); - rte_errno = EINVAL; - return rte_errno; + if (!mlx5dr_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) { + DR_LOG(ERR, "Argument log size %d not supported by FW", multi_arg_log_sz); + rte_errno = ENOTSUP; + return NULL; } - action->modify_header.arg_obj = - mlx5dr_cmd_arg_create(ctx->ibv_ctx, args_log_size + bulk_size, - ctx->pd_num); - if (!action->modify_header.arg_obj) { - DR_LOG(ERR, "Failed allocating arg in order: %d", - args_log_size + bulk_size); - return rte_errno; + /* Alloc bulk of args */ + arg_obj = mlx5dr_cmd_arg_create(ctx->ibv_ctx, multi_arg_log_sz, ctx->pd_num); + if (!arg_obj) { + DR_LOG(ERR, "Failed allocating arg in order: %d", multi_arg_log_sz); + return NULL; } - /* When INLINE need to write the arg data */ - if (flags & MLX5DR_ACTION_FLAG_SHARED) + if (write_data) { ret = mlx5dr_arg_write_inline_arg_data(ctx, - action->modify_header.arg_obj->id, - (uint8_t *)pattern, - num_of_actions * - MLX5DR_MODIFY_ACTION_SIZE); - if (ret) { - DR_LOG(ERR, "Failed writing INLINE arg in order: %d", - args_log_size + bulk_size); - mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj); - return rte_errno; + arg_obj->id, + data, data_sz); + if (ret) { + DR_LOG(ERR, "Failed writing arg data"); + mlx5dr_cmd_destroy_obj(arg_obj); + return NULL; + } } - return 0; + return arg_obj; } -bool mlx5dr_pat_arg_verify_actions(__be64 pattern[], uint16_t num_of_actions) +struct mlx5dr_devx_obj * +mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx, + __be64 *data, + uint8_t num_of_actions, + uint32_t log_bulk_sz, + bool write_data) { - int i; + size_t data_sz = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE; + struct mlx5dr_devx_obj *arg_obj; + + arg_obj = mlx5dr_arg_create(ctx, + (uint8_t *)data, + data_sz, + log_bulk_sz, + write_data); + if (!arg_obj) + DR_LOG(ERR, "Failed creating modify header arg"); + + return arg_obj; +} - for (i = 0; i < num_of_actions; i++) { +bool mlx5dr_pat_verify_actions(__be64 pattern[], size_t sz) +{ + size_t i; + + for (i = 0; i < sz / MLX5DR_MODIFY_ACTION_SIZE; i++) { u8 action_id = MLX5_GET(set_action_in, &pattern[i], action_type); if (action_id >= MLX5_MODIFICATION_TYPE_MAX) { @@ -456,51 +455,3 @@ bool mlx5dr_pat_arg_verify_actions(__be64 pattern[], uint16_t num_of_actions) return true; } - -int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx, - struct mlx5dr_action *action, - size_t pattern_sz, - __be64 pattern[], - uint32_t bulk_size) -{ - uint16_t num_of_actions; - int ret; - - num_of_actions = pattern_sz / MLX5DR_MODIFY_ACTION_SIZE; - if (num_of_actions == 0) { - DR_LOG(ERR, "Invalid number of actions %u", num_of_actions); - rte_errno = EINVAL; - return rte_errno; - } - - action->modify_header.num_of_actions = num_of_actions; - - ret = mlx5dr_arg_create_modify_header_arg(ctx, action, - num_of_actions, - pattern, - bulk_size); - if (ret) { - DR_LOG(ERR, "Failed to allocate arg"); - return ret; - } - - ret = mlx5dr_pat_get_pattern(ctx, action, num_of_actions, pattern_sz, - pattern); - if (ret) { - DR_LOG(ERR, "Failed to allocate pattern"); - goto free_arg; - } - - return 0; - -free_arg: - mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj); - return rte_errno; -} - -void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx, - struct mlx5dr_action *action) -{ - mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj); - mlx5dr_pat_put_pattern(ctx->pattern_cache, action); -} diff --git a/drivers/net/mlx5/hws/mlx5dr_pat_arg.h b/drivers/net/mlx5/hws/mlx5dr_pat_arg.h index ec467dbb4bb..2a38891c4dd 100644 --- a/drivers/net/mlx5/hws/mlx5dr_pat_arg.h +++ b/drivers/net/mlx5/hws/mlx5dr_pat_arg.h @@ -28,7 +28,6 @@ struct mlx5dr_pattern_cache { }; struct mlx5dr_pattern_cache_item { - enum mlx5dr_action_type type; struct { struct mlx5dr_devx_obj *pattern_obj; struct dr_icm_chunk *chunk; @@ -53,16 +52,29 @@ int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache); void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache); -bool mlx5dr_pat_arg_verify_actions(__be64 pattern[], uint16_t num_of_actions); - -int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx, - struct mlx5dr_action *action, - size_t pattern_sz, - __be64 pattern[], - uint32_t bulk_size); - -void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx, - struct mlx5dr_action *action); +bool mlx5dr_pat_verify_actions(__be64 pattern[], size_t sz); + +struct mlx5dr_devx_obj * +mlx5dr_arg_create(struct mlx5dr_context *ctx, + uint8_t *data, + size_t data_sz, + uint32_t log_bulk_sz, + bool write_data); + +struct mlx5dr_devx_obj * +mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx, + __be64 *data, + uint8_t num_of_actions, + uint32_t log_bulk_sz, + bool write_data); + +struct mlx5dr_devx_obj * +mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx, + __be64 *pattern, + size_t pattern_sz); + +void mlx5dr_pat_put_pattern(struct mlx5dr_context *ctx, + struct mlx5dr_devx_obj *pat_obj); bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx, uint32_t arg_size); diff --git a/drivers/net/mlx5/hws/mlx5dr_pool.c b/drivers/net/mlx5/hws/mlx5dr_pool.c index af6a5c743bf..b7b532c7cfb 100644 --- a/drivers/net/mlx5/hws/mlx5dr_pool.c +++ b/drivers/net/mlx5/hws/mlx5dr_pool.c @@ -116,7 +116,7 @@ static int mlx5dr_pool_bitmap_get_free_slot(struct rte_bitmap *bitmap, uint32_t if (!rte_bitmap_scan(bitmap, iidx, &slab)) return ENOMEM; - *iidx += __builtin_ctzll(slab); + *iidx += rte_ctz64(slab); rte_bitmap_clear(bitmap, *iidx); diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c index f91f04d924d..e1150cd75d0 100644 --- a/drivers/net/mlx5/hws/mlx5dr_table.c +++ b/drivers/net/mlx5/hws/mlx5dr_table.c @@ -90,7 +90,7 @@ mlx5dr_table_connect_to_default_miss_tbl(struct mlx5dr_table *tbl, ret = mlx5dr_cmd_flow_table_modify(ft, &ft_attr); if (ret) { DR_LOG(ERR, "Failed to connect FT to default FDB FT"); - return errno; + return ret; } return 0; @@ -396,7 +396,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_context *ctx, return NULL; } - tbl = simple_malloc(sizeof(*tbl)); + tbl = simple_calloc(1, sizeof(*tbl)); if (!tbl) { rte_errno = ENOMEM; return NULL; @@ -405,7 +405,6 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_context *ctx, tbl->ctx = ctx; tbl->type = attr->type; tbl->level = attr->level; - LIST_INIT(&tbl->head); ret = mlx5dr_table_init(tbl); if (ret) { @@ -427,12 +426,223 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_context *ctx, int mlx5dr_table_destroy(struct mlx5dr_table *tbl) { struct mlx5dr_context *ctx = tbl->ctx; - pthread_spin_lock(&ctx->ctrl_lock); + if (!LIST_EMPTY(&tbl->head)) { + DR_LOG(ERR, "Cannot destroy table containing matchers"); + rte_errno = EBUSY; + goto unlock_err; + } + + if (!LIST_EMPTY(&tbl->default_miss.head)) { + DR_LOG(ERR, "Cannot destroy table pointed by default miss"); + rte_errno = EBUSY; + goto unlock_err; + } + LIST_REMOVE(tbl, next); pthread_spin_unlock(&ctx->ctrl_lock); mlx5dr_table_uninit(tbl); simple_free(tbl); return 0; + +unlock_err: + pthread_spin_unlock(&ctx->ctrl_lock); + return -rte_errno; +} + +static struct mlx5dr_devx_obj * +mlx5dr_table_get_last_ft(struct mlx5dr_table *tbl) +{ + struct mlx5dr_devx_obj *last_ft = tbl->ft; + struct mlx5dr_matcher *matcher; + + LIST_FOREACH(matcher, &tbl->head, next) + last_ft = matcher->end_ft; + + return last_ft; +} + +int mlx5dr_table_ft_set_default_next_ft(struct mlx5dr_table *tbl, + struct mlx5dr_devx_obj *ft_obj) +{ + struct mlx5dr_cmd_ft_modify_attr ft_attr = {0}; + int ret; + + /* Due to FW limitation, resetting the flow table to default action will + * disconnect RTC when ignore_flow_level_rtc_valid is not supported. + */ + if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) + return 0; + + if (tbl->type == MLX5DR_TABLE_TYPE_FDB) + return mlx5dr_table_connect_to_default_miss_tbl(tbl, ft_obj); + + ft_attr.type = tbl->fw_ft_type; + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT; + + ret = mlx5dr_cmd_flow_table_modify(ft_obj, &ft_attr); + if (ret) { + DR_LOG(ERR, "Failed to set FT default miss action"); + return ret; + } + + return 0; +} + +int mlx5dr_table_ft_set_next_rtc(struct mlx5dr_devx_obj *ft, + uint32_t fw_ft_type, + struct mlx5dr_devx_obj *rtc_0, + struct mlx5dr_devx_obj *rtc_1) +{ + struct mlx5dr_cmd_ft_modify_attr ft_attr = {0}; + + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; + ft_attr.type = fw_ft_type; + ft_attr.rtc_id_0 = rtc_0 ? rtc_0->id : 0; + ft_attr.rtc_id_1 = rtc_1 ? rtc_1->id : 0; + + return mlx5dr_cmd_flow_table_modify(ft, &ft_attr); +} + +static int mlx5dr_table_ft_set_next_ft(struct mlx5dr_devx_obj *ft, + uint32_t fw_ft_type, + uint32_t next_ft_id) +{ + struct mlx5dr_cmd_ft_modify_attr ft_attr = {0}; + + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; + ft_attr.type = fw_ft_type; + ft_attr.table_miss_id = next_ft_id; + + return mlx5dr_cmd_flow_table_modify(ft, &ft_attr); +} + +int mlx5dr_table_update_connected_miss_tables(struct mlx5dr_table *dst_tbl) +{ + struct mlx5dr_table *src_tbl; + int ret; + + if (LIST_EMPTY(&dst_tbl->default_miss.head)) + return 0; + + LIST_FOREACH(src_tbl, &dst_tbl->default_miss.head, default_miss.next) { + ret = mlx5dr_table_connect_to_miss_table(src_tbl, dst_tbl); + if (ret) { + DR_LOG(ERR, "Failed to update source miss table, unexpected behavior"); + return ret; + } + } + + return 0; +} + +int mlx5dr_table_connect_to_miss_table(struct mlx5dr_table *src_tbl, + struct mlx5dr_table *dst_tbl) +{ + struct mlx5dr_devx_obj *last_ft; + struct mlx5dr_matcher *matcher; + int ret; + + last_ft = mlx5dr_table_get_last_ft(src_tbl); + + if (dst_tbl) { + if (LIST_EMPTY(&dst_tbl->head)) { + /* Connect src_tbl last_ft to dst_tbl start anchor */ + ret = mlx5dr_table_ft_set_next_ft(last_ft, + src_tbl->fw_ft_type, + dst_tbl->ft->id); + if (ret) + return ret; + + /* Reset last_ft RTC to default RTC */ + ret = mlx5dr_table_ft_set_next_rtc(last_ft, + src_tbl->fw_ft_type, + NULL, NULL); + if (ret) + return ret; + } else { + /* Connect src_tbl last_ft to first matcher RTC */ + matcher = LIST_FIRST(&dst_tbl->head); + ret = mlx5dr_table_ft_set_next_rtc(last_ft, + src_tbl->fw_ft_type, + matcher->match_ste.rtc_0, + matcher->match_ste.rtc_1); + if (ret) + return ret; + + /* Reset next miss FT to default */ + ret = mlx5dr_table_ft_set_default_next_ft(src_tbl, last_ft); + if (ret) + return ret; + } + } else { + /* Reset next miss FT to default */ + ret = mlx5dr_table_ft_set_default_next_ft(src_tbl, last_ft); + if (ret) + return ret; + + /* Reset last_ft RTC to default RTC */ + ret = mlx5dr_table_ft_set_next_rtc(last_ft, + src_tbl->fw_ft_type, + NULL, NULL); + if (ret) + return ret; + } + + src_tbl->default_miss.miss_tbl = dst_tbl; + + return 0; +} + +static int mlx5dr_table_set_default_miss_not_valid(struct mlx5dr_table *tbl, + struct mlx5dr_table *miss_tbl) +{ + if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid || + mlx5dr_context_shared_gvmi_used(tbl->ctx)) { + DR_LOG(ERR, "Default miss table is not supported"); + rte_errno = EOPNOTSUPP; + return -rte_errno; + } + + if (mlx5dr_table_is_root(tbl) || + (miss_tbl && mlx5dr_table_is_root(miss_tbl)) || + (miss_tbl && miss_tbl->type != tbl->type) || + (miss_tbl && tbl->default_miss.miss_tbl)) { + DR_LOG(ERR, "Invalid arguments"); + rte_errno = EINVAL; + return -rte_errno; + } + + return 0; +} + +int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, + struct mlx5dr_table *miss_tbl) +{ + struct mlx5dr_context *ctx = tbl->ctx; + int ret; + + ret = mlx5dr_table_set_default_miss_not_valid(tbl, miss_tbl); + if (ret) + return ret; + + pthread_spin_lock(&ctx->ctrl_lock); + + ret = mlx5dr_table_connect_to_miss_table(tbl, miss_tbl); + if (ret) + goto out; + + if (miss_tbl) + LIST_INSERT_HEAD(&miss_tbl->default_miss.head, tbl, default_miss.next); + else + LIST_REMOVE(tbl, default_miss.next); + + pthread_spin_unlock(&ctx->ctrl_lock); + return 0; +out: + pthread_spin_unlock(&ctx->ctrl_lock); + return -ret; } diff --git a/drivers/net/mlx5/hws/mlx5dr_table.h b/drivers/net/mlx5/hws/mlx5dr_table.h index 362d8a90483..b2fbb474166 100644 --- a/drivers/net/mlx5/hws/mlx5dr_table.h +++ b/drivers/net/mlx5/hws/mlx5dr_table.h @@ -7,6 +7,14 @@ #define MLX5DR_ROOT_LEVEL 0 +struct mlx5dr_default_miss { + /* My miss table */ + struct mlx5dr_table *miss_tbl; + LIST_ENTRY(mlx5dr_table) next; + /* Tables missing to my table */ + LIST_HEAD(miss_table_head, mlx5dr_table) head; +}; + struct mlx5dr_table { struct mlx5dr_context *ctx; struct mlx5dr_devx_obj *ft; @@ -16,6 +24,7 @@ struct mlx5dr_table { uint32_t level; LIST_HEAD(matcher_head, mlx5dr_matcher) head; LIST_ENTRY(mlx5dr_table) next; + struct mlx5dr_default_miss default_miss; }; static inline @@ -43,4 +52,18 @@ struct mlx5dr_devx_obj *mlx5dr_table_create_default_ft(struct ibv_context *ibv, void mlx5dr_table_destroy_default_ft(struct mlx5dr_table *tbl, struct mlx5dr_devx_obj *ft_obj); + +int mlx5dr_table_connect_to_miss_table(struct mlx5dr_table *src_tbl, + struct mlx5dr_table *dst_tbl); + +int mlx5dr_table_update_connected_miss_tables(struct mlx5dr_table *dst_tbl); + +int mlx5dr_table_ft_set_default_next_ft(struct mlx5dr_table *tbl, + struct mlx5dr_devx_obj *ft_obj); + +int mlx5dr_table_ft_set_next_rtc(struct mlx5dr_devx_obj *ft, + uint32_t fw_ft_type, + struct mlx5dr_devx_obj *rtc_0, + struct mlx5dr_devx_obj *rtc_1); + #endif /* MLX5DR_TABLE_H_ */ diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c index 639e629fe48..dd5a0c546d7 100644 --- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -1083,6 +1083,7 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) line_size = getline(&port_name, &port_name_size, file); if (line_size < 0) { + free(port_name); fclose(file); rte_errno = errno; return -rte_errno; diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index d8f1adfe3df..d5ef695e6d9 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -675,6 +675,9 @@ void mlx5_os_free_shared_dr(struct mlx5_priv *priv) { struct mlx5_dev_ctx_shared *sh = priv->sh; +#ifdef HAVE_MLX5DV_DR + int i; +#endif MLX5_ASSERT(sh && sh->refcnt); if (sh->refcnt > 1) @@ -703,18 +706,20 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv) mlx5_glue->destroy_flow_action(sh->pop_vlan_action); sh->pop_vlan_action = NULL; } - if (sh->send_to_kernel_action.action) { - void *action = sh->send_to_kernel_action.action; + for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) { + if (sh->send_to_kernel_action[i].action) { + void *action = sh->send_to_kernel_action[i].action; - mlx5_glue->destroy_flow_action(action); - sh->send_to_kernel_action.action = NULL; - } - if (sh->send_to_kernel_action.tbl) { - struct mlx5_flow_tbl_resource *tbl = - sh->send_to_kernel_action.tbl; + mlx5_glue->destroy_flow_action(action); + sh->send_to_kernel_action[i].action = NULL; + } + if (sh->send_to_kernel_action[i].tbl) { + struct mlx5_flow_tbl_resource *tbl = + sh->send_to_kernel_action[i].tbl; - flow_dv_tbl_resource_release(sh, tbl); - sh->send_to_kernel_action.tbl = NULL; + flow_dv_tbl_resource_release(sh, tbl); + sh->send_to_kernel_action[i].tbl = NULL; + } } #endif /* HAVE_MLX5DV_DR */ if (sh->default_miss_action) @@ -1311,7 +1316,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, * REG_C_0 and REG_C_1 is reserved for metadata feature. */ reg_c_mask &= 0xfc; - if (__builtin_popcount(reg_c_mask) < 1) { + if (rte_popcount32(reg_c_mask) < 1) { priv->mtr_en = 0; DRV_LOG(WARNING, "No available register for" " meter."); @@ -1592,8 +1597,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, err = ENOTSUP; goto error; } - usable_bits = __builtin_popcount(priv->sh->dv_regc0_mask); - required_bits = __builtin_popcount(priv->vport_meta_mask); + usable_bits = rte_popcount32(priv->sh->dv_regc0_mask); + required_bits = rte_popcount32(priv->vport_meta_mask); if (usable_bits < required_bits) { DRV_LOG(ERR, "Not enough bits available in reg_c[0] to provide " "representor matching."); diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index b373306f981..997df595d0a 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -139,7 +139,7 @@ /* Enable extensive flow metadata support. */ #define MLX5_DV_XMETA_EN "dv_xmeta_en" -/* Device parameter to let the user manage the lacp traffic of bonded device */ +/* Device parameter to let the user manage the lacp traffic of bonding device */ #define MLX5_LACP_BY_USER "lacp_by_user" /* Activate Netlink support in VF mode. */ @@ -241,7 +241,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .type = "mlx5_port_id_ipool", }, [MLX5_IPOOL_JUMP] = { - .size = sizeof(struct mlx5_flow_tbl_data_entry), + /* + * MLX5_IPOOL_JUMP ipool entry size depends on selected flow engine. + * When HW steering is enabled mlx5_flow_group struct is used. + * Otherwise mlx5_flow_tbl_data_entry struct is used. + */ + .size = 0, .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, @@ -904,6 +909,14 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh) sizeof(struct mlx5_flow_handle) : MLX5_FLOW_HANDLE_VERBS_SIZE; break; +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) + /* Set MLX5_IPOOL_JUMP ipool entry size depending on selected flow engine. */ + case MLX5_IPOOL_JUMP: + cfg.size = sh->config.dv_flow_en == 2 ? + sizeof(struct mlx5_flow_group) : + sizeof(struct mlx5_flow_tbl_data_entry); + break; +#endif } if (sh->config.reclaim_mode) { cfg.release_mem_en = 1; @@ -1720,7 +1733,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, do { if (sh->tis[i]) claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); - } while (++i < (uint32_t)sh->bond.n_port); + } while (++i <= (uint32_t)sh->bond.n_port); if (sh->td) claim_zero(mlx5_devx_cmd_destroy(sh->td)); mlx5_free(sh); @@ -1864,7 +1877,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) do { if (sh->tis[i]) claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); - } while (++i < sh->bond.n_port); + } while (++i <= sh->bond.n_port); if (sh->td) claim_zero(mlx5_devx_cmd_destroy(sh->td)); #ifdef HAVE_MLX5_HWS_SUPPORT diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 3785103308b..0b709a1bdaa 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -1368,7 +1368,7 @@ struct mlx5_hws_cnt_svc_mng { uint32_t refcnt; uint32_t service_core; uint32_t query_interval; - pthread_t service_thread; + rte_thread_t service_thread; uint8_t svc_running; struct mlx5_hws_aso_mng aso_mng __rte_cache_aligned; }; @@ -1432,7 +1432,9 @@ struct mlx5_dev_ctx_shared { /* Direct Rules tables for FDB, NIC TX+RX */ void *dr_drop_action; /* Pointer to DR drop action, any domain. */ void *pop_vlan_action; /* Pointer to DR pop VLAN action. */ - struct mlx5_send_to_kernel_action send_to_kernel_action; +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) + struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX]; +#endif struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */ struct mlx5_hlist *modify_cmds; struct mlx5_hlist *tag_table; @@ -1715,7 +1717,6 @@ struct mlx5_quota { /* Bulk management structure for flow quota. */ struct mlx5_quota_ctx { - uint32_t nb_quotas; /* Total number of quota objects */ struct mlx5dr_action *dr_action; /* HWS action */ struct mlx5_devx_obj *devx_obj; /* DEVX ranged object. */ struct mlx5_pmd_mr mr; /* MR for READ from MTR ASO */ @@ -1856,7 +1857,7 @@ struct mlx5_priv { /* HW steering global tag action. */ struct mlx5dr_action *hw_tag[2]; /* HW steering global send to kernel action. */ - struct mlx5dr_action *hw_send_to_kernel; + struct mlx5dr_action *hw_send_to_kernel[MLX5DR_TABLE_TYPE_MAX]; /* HW steering create ongoing rte flow table list header. */ LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo; struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index e91eb636d0f..8ad85e60271 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3198,6 +3198,11 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint8_t vni[4]; } id = { .vlan_id = 0, }; + struct rte_flow_item_vxlan_gpe nic_mask = { + .vni = "\xff\xff\xff", + .protocol = 0xff, + }; + if (!priv->sh->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3221,18 +3226,12 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, mask = &rte_flow_item_vxlan_gpe_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, + (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_vxlan_gpe), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; if (spec) { - if (spec->hdr.proto) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VxLAN-GPE protocol" - " not supported"); memcpy(&id.vni[1], spec->hdr.vni, 3); memcpy(&id.vni[1], mask->hdr.vni, 3); } @@ -3906,6 +3905,45 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, MLX5_ITEM_RANGE_NOT_ACCEPTED, error); } +/** + * Validate the NSH item. + * + * @param[in] dev + * Pointer to Ethernet device on which flow rule is being created on. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (item->mask) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "NSH fields matching is not supported"); + } + + if (!priv->sh->config.dv_flow_en) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "NSH support requires DV flow interface"); + } + + if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_vxlan_gpe_nsh) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Current FW does not support matching on NSH"); + } + + return 0; +} + static int flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, @@ -5853,7 +5891,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, "Failed to allocate meter flow id."); flow_id = tag_id - 1; flow_id_bits = (!flow_id) ? 1 : - (MLX5_REG_BITS - __builtin_clz(flow_id)); + (MLX5_REG_BITS - rte_clz32(flow_id)); if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) > mtr_reg_bits) { mlx5_ipool_free(fm->flow_ipool, tag_id); diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 3a97975d69f..903ff66d72f 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -233,6 +233,9 @@ enum mlx5_feature_name { /* IB BTH ITEM. */ #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51) +/* NSH ITEM */ +#define MLX5_FLOW_ITEM_NSH (1ull << 53) + /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) @@ -331,6 +334,7 @@ enum mlx5_feature_name { #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ + MLX5_FLOW_ACTION_SEND_TO_KERNEL | \ MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ @@ -2453,6 +2457,9 @@ int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, uint16_t ether_type, const struct rte_flow_item_ecpri *acc_mask, struct rte_flow_error *error); +int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + struct rte_flow_error *error); int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, struct mlx5_flow_meter_info *fm, uint32_t mtr_idx, diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index a8dd9920e6a..3dc2fe5c717 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -421,7 +421,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, /* Deduce actual data width in bits from mask value. */ off_b = rte_bsf32(mask) + carry_b; size_b = sizeof(uint32_t) * CHAR_BIT - - off_b - __builtin_clz(mask); + off_b - rte_clz32(mask); } MLX5_ASSERT(size_b); actions[i] = (struct mlx5_modification_cmd) { @@ -1392,10 +1392,10 @@ mlx5_flow_item_field_width(struct rte_eth_dev *dev, case RTE_FLOW_FIELD_TAG: return 32; case RTE_FLOW_FIELD_MARK: - return __builtin_popcount(priv->sh->dv_mark_mask); + return rte_popcount32(priv->sh->dv_mark_mask); case RTE_FLOW_FIELD_META: return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ? - __builtin_popcount(priv->sh->dv_meta_mask) : 32; + rte_popcount32(priv->sh->dv_meta_mask) : 32; case RTE_FLOW_FIELD_POINTER: case RTE_FLOW_FIELD_VALUE: return inherit < 0 ? 0 : inherit; @@ -1940,7 +1940,7 @@ mlx5_flow_field_id_to_modify_info case RTE_FLOW_FIELD_MARK: { uint32_t mark_mask = priv->sh->dv_mark_mask; - uint32_t mark_count = __builtin_popcount(mark_mask); + uint32_t mark_count = rte_popcount32(mark_mask); RTE_SET_USED(mark_count); MLX5_ASSERT(data->offset + width <= mark_count); int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, @@ -1961,7 +1961,7 @@ mlx5_flow_field_id_to_modify_info case RTE_FLOW_FIELD_META: { uint32_t meta_mask = priv->sh->dv_meta_mask; - uint32_t meta_count = __builtin_popcount(meta_mask); + uint32_t meta_count = rte_popcount32(meta_mask); RTE_SET_USED(meta_count); MLX5_ASSERT(data->offset + width <= meta_count); int reg = flow_dv_get_metadata_reg(dev, attr, error); @@ -2002,7 +2002,7 @@ mlx5_flow_field_id_to_modify_info case MLX5_RTE_FLOW_FIELD_META_REG: { uint32_t meta_mask = priv->sh->dv_meta_mask; - uint32_t meta_count = __builtin_popcount(meta_mask); + uint32_t meta_count = rte_popcount32(meta_mask); uint8_t reg = flow_tag_index_get(data); RTE_SET_USED(meta_count); @@ -7815,6 +7815,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_ITEM_IB_BTH; break; + case RTE_FLOW_ITEM_TYPE_NSH: + ret = mlx5_flow_validate_item_nsh(dev, items, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_NSH; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -9720,7 +9726,9 @@ flow_dv_translate_item_vxlan_gpe(void *key, const struct rte_flow_item *item, v_protocol = vxlan_v->hdr.protocol; if (!m_protocol) { /* Force next protocol to ensure next headers parsing. */ - if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) + if (pattern_flags & MLX5_FLOW_ITEM_NSH) + v_protocol = RTE_VXLAN_GPE_TYPE_NSH; + else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) v_protocol = RTE_VXLAN_GPE_TYPE_ETH; else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) v_protocol = RTE_VXLAN_GPE_TYPE_IPV4; @@ -12724,17 +12732,22 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, static void * flow_dv_translate_action_send_to_kernel(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { struct mlx5_flow_tbl_resource *tbl; struct mlx5_dev_ctx_shared *sh; uint32_t priority; void *action; + int ft_type; int ret; sh = MLX5_SH(dev); - if (sh->send_to_kernel_action.action) - return sh->send_to_kernel_action.action; + ft_type = (attr->ingress) ? MLX5DR_TABLE_TYPE_NIC_RX : + ((attr->transfer) ? MLX5DR_TABLE_TYPE_FDB : + MLX5DR_TABLE_TYPE_NIC_TX); + if (sh->send_to_kernel_action[ft_type].action) + return sh->send_to_kernel_action[ft_type].action; priority = mlx5_get_send_to_kernel_priority(dev); if (priority == (uint32_t)-1) { rte_flow_error_set(error, ENOTSUP, @@ -12742,7 +12755,7 @@ flow_dv_translate_action_send_to_kernel(struct rte_eth_dev *dev, "required priority is not available"); return NULL; } - tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, 0, + tbl = flow_dv_tbl_resource_get(dev, 0, attr->egress, attr->transfer, false, NULL, 0, 0, 0, error); if (!tbl) { rte_flow_error_set(error, ENODATA, @@ -12759,8 +12772,8 @@ flow_dv_translate_action_send_to_kernel(struct rte_eth_dev *dev, goto err; } MLX5_ASSERT(action); - sh->send_to_kernel_action.action = action; - sh->send_to_kernel_action.tbl = tbl; + sh->send_to_kernel_action[ft_type].action = action; + sh->send_to_kernel_action[ft_type].tbl = tbl; return action; err: flow_dv_tbl_resource_release(sh, tbl); @@ -13910,6 +13923,9 @@ flow_dv_translate_items(struct rte_eth_dev *dev, flow_dv_translate_item_ib_bth(key, items, tunnel, key_type); last_item = MLX5_FLOW_ITEM_IB_BTH; break; + case RTE_FLOW_ITEM_TYPE_NSH: + last_item = MLX5_FLOW_ITEM_NSH; + break; default: break; } @@ -14511,7 +14527,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL: dev_flow->dv.actions[actions_n] = - flow_dv_translate_action_send_to_kernel(dev, + flow_dv_translate_action_send_to_kernel(dev, attr, error); if (!dev_flow->dv.actions[actions_n]) return -rte_errno; diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index 5395969eb07..6fcf654e4a7 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -1344,8 +1344,7 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ? ASO_METER_WAIT : ASO_METER_WAIT_ASYNC; aso_mtr->offset = mtr_id - 1; - aso_mtr->init_color = (meter_mark->color_mode) ? - meter_mark->init_color : RTE_COLOR_GREEN; + aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN; /* Update ASO flow meter by wqe. */ if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr, &priv->mtr_bulk, user_data, push)) { @@ -1380,9 +1379,6 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev, /* Compile METER_MARK action */ acts[aso_mtr_pos].action = pool->action; acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset; - acts[aso_mtr_pos].aso_meter.init_color = - (enum mlx5dr_action_aso_meter_color) - rte_col_2_mlx5_col(aso_mtr->init_color); *index = aso_mtr->fm.meter_id; return 0; } @@ -1439,6 +1435,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, uint32_t ct_idx; int err; uint32_t target_grp = 0; + int table_type; flow_hw_modify_field_init(&mhdr, at); if (attr->transfer) @@ -1635,7 +1632,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, "Send to kernel action on root table is not supported in HW steering mode"); } action_pos = at->actions_off[actions - at->actions]; - acts->rule_acts[action_pos].action = priv->hw_send_to_kernel; + table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX : + ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX : + MLX5DR_TABLE_TYPE_FDB); + acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type]; break; case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: err = flow_hw_modify_field_compile(dev, attr, action_start, @@ -1770,6 +1770,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, } } if (mhdr.pos != UINT16_MAX) { + struct mlx5dr_action_mh_pattern pattern; uint32_t flags; uint32_t bulk_size; size_t mhdr_len; @@ -1791,14 +1792,17 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, } else { bulk_size = rte_log2_u32(table_attr->nb_flows); } + pattern.data = (__be64 *)acts->mhdr->mhdr_cmds; + pattern.sz = mhdr_len; acts->mhdr->action = mlx5dr_action_create_modify_header - (priv->dr_ctx, mhdr_len, (__be64 *)acts->mhdr->mhdr_cmds, + (priv->dr_ctx, 1, &pattern, bulk_size, flags); if (!acts->mhdr->action) goto err; acts->rule_acts[acts->mhdr->pos].action = acts->mhdr->action; } if (reformat_used) { + struct mlx5dr_action_reformat_header hdr; uint8_t buf[MLX5_ENCAP_MAX_LEN]; bool shared_rfmt = true; @@ -1822,9 +1826,12 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, acts->encap_decap->data_size = data_size; memcpy(acts->encap_decap->data, encap_data, data_size); } + + hdr.sz = data_size; + hdr.data = encap_data; acts->encap_decap->action = mlx5dr_action_create_reformat (priv->dr_ctx, refmt_type, - data_size, encap_data, + 1, &hdr, shared_rfmt ? 0 : rte_log2_u32(table_attr->nb_flows), mlx5_hw_act_flag[!!attr->group][type] | (shared_rfmt ? MLX5DR_ACTION_FLAG_SHARED : 0)); @@ -2068,9 +2075,6 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue, return -1; rule_act->action = pool->action; rule_act->aso_meter.offset = aso_mtr->offset; - rule_act->aso_meter.init_color = - (enum mlx5dr_action_aso_meter_color) - rte_col_2_mlx5_col(aso_mtr->init_color); break; case MLX5_INDIRECT_ACTION_TYPE_QUOTA: flow_hw_construct_quota(priv, rule_act, idx); @@ -2483,9 +2487,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, pool->action; rule_acts[act_data->action_dst].aso_meter.offset = aso_mtr->offset; - rule_acts[act_data->action_dst].aso_meter.init_color = - (enum mlx5dr_action_aso_meter_color) - rte_col_2_mlx5_col(aso_mtr->init_color); break; case RTE_FLOW_ACTION_TYPE_METER_MARK: /* @@ -4338,8 +4339,11 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, const struct rte_flow_action_count *count_mask = NULL; bool fixed_cnt = false; uint64_t action_flags = 0; - uint16_t i; bool actions_end = false; +#ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE + int table_type; +#endif + uint16_t i; int ret; /* FDB actions are only valid to proxy port. */ @@ -4390,7 +4394,10 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "action not supported in guest port"); - if (!priv->hw_send_to_kernel) + table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX : + ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX : + MLX5DR_TABLE_TYPE_FDB); + if (!priv->hw_send_to_kernel[table_type]) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -5948,13 +5955,19 @@ static void flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused) { #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE - priv->hw_send_to_kernel = - mlx5dr_action_create_dest_root(priv->dr_ctx, - MLX5_HW_LOWEST_PRIO_ROOT, - MLX5DR_ACTION_FLAG_HWS_RX); - if (!priv->hw_send_to_kernel) { - DRV_LOG(WARNING, "Unable to create HWS send to kernel action"); - return; + int action_flag; + int i; + + for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) { + action_flag = mlx5_hw_act_flag[1][i]; + priv->hw_send_to_kernel[i] = + mlx5dr_action_create_dest_root(priv->dr_ctx, + MLX5_HW_LOWEST_PRIO_ROOT, + action_flag); + if (!priv->hw_send_to_kernel[i]) { + DRV_LOG(WARNING, "Unable to create HWS send to kernel action"); + return; + } } #endif } @@ -5962,9 +5975,12 @@ flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused) static void flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv) { - if (priv->hw_send_to_kernel) { - mlx5dr_action_destroy(priv->hw_send_to_kernel); - priv->hw_send_to_kernel = NULL; + int i; + for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) { + if (priv->hw_send_to_kernel[i]) { + mlx5dr_action_destroy(priv->hw_send_to_kernel[i]); + priv->hw_send_to_kernel[i] = NULL; + } } } @@ -6014,7 +6030,7 @@ flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev) * Availability of sufficient number of bits in REG_C_0 is verified on initialization. * Sanity checking here. */ - MLX5_ASSERT(__builtin_popcount(mask) >= __builtin_popcount(priv->vport_meta_mask)); + MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask)); return mask; } @@ -6082,7 +6098,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev, .src = { .field = RTE_FLOW_FIELD_VALUE, }, - .width = __builtin_popcount(tag_mask), + .width = rte_popcount32(tag_mask), }; struct rte_flow_action_modify_field set_tag_m = { .operation = RTE_FLOW_MODIFY_SET, @@ -6458,7 +6474,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev, .src = { .field = RTE_FLOW_FIELD_VALUE, }, - .width = __builtin_popcount(marker_mask), + .width = rte_popcount32(marker_mask), }; struct rte_flow_action_modify_field set_reg_m = { .operation = RTE_FLOW_MODIFY_SET, @@ -7846,7 +7862,7 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } /* Initialize quotas */ - if (port_attr->nb_quotas) { + if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) { ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -8659,6 +8675,45 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, return handle; } +static int +mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_update_meter_mark *upd_meter_mark, + uint32_t idx, bool push, + struct mlx5_hw_q_job *job, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; + const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark; + struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx); + struct mlx5_flow_meter_info *fm; + + if (!aso_mtr) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Invalid meter_mark update index"); + fm = &aso_mtr->fm; + if (upd_meter_mark->profile_valid) + fm->profile = (struct mlx5_flow_meter_profile *) + (meter_mark->profile); + if (upd_meter_mark->color_mode_valid) + fm->color_aware = meter_mark->color_mode; + if (upd_meter_mark->state_valid) + fm->is_enable = meter_mark->state; + /* Update ASO flow meter by wqe. */ + if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, + aso_mtr, &priv->mtr_bulk, job, push)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Unable to update ASO meter WQE"); + /* Wait for ASO object completion. */ + if (queue == MLX5_HW_INV_QUEUE && + mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Unable to wait for ASO meter CQE"); + return 0; +} + /** * Update shared action. * @@ -8689,15 +8744,9 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; const struct rte_flow_modify_conntrack *ct_conf = (const struct rte_flow_modify_conntrack *)update; - const struct rte_flow_update_meter_mark *upd_meter_mark = - (const struct rte_flow_update_meter_mark *)update; - const struct rte_flow_action_meter_mark *meter_mark; struct mlx5_hw_q_job *job = NULL; - struct mlx5_aso_mtr *aso_mtr; - struct mlx5_flow_meter_info *fm; uint32_t act_idx = (uint32_t)(uintptr_t)handle; uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); @@ -8724,44 +8773,8 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, break; case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: aso = true; - meter_mark = &upd_meter_mark->meter_mark; - /* Find ASO object. */ - aso_mtr = mlx5_ipool_get(pool->idx_pool, idx); - if (!aso_mtr) { - ret = -EINVAL; - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Invalid meter_mark update index"); - break; - } - fm = &aso_mtr->fm; - if (upd_meter_mark->profile_valid) - fm->profile = (struct mlx5_flow_meter_profile *) - (meter_mark->profile); - if (upd_meter_mark->color_mode_valid) - fm->color_aware = meter_mark->color_mode; - if (upd_meter_mark->init_color_valid) - aso_mtr->init_color = (meter_mark->color_mode) ? - meter_mark->init_color : RTE_COLOR_GREEN; - if (upd_meter_mark->state_valid) - fm->is_enable = meter_mark->state; - /* Update ASO flow meter by wqe. */ - if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, - aso_mtr, &priv->mtr_bulk, job, push)) { - ret = -EINVAL; - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Unable to update ASO meter WQE"); - break; - } - /* Wait for ASO object completion. */ - if (queue == MLX5_HW_INV_QUEUE && - mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { - ret = -EINVAL; - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Unable to wait for ASO meter CQE"); - } + ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push, + job, error); break; case MLX5_INDIRECT_ACTION_TYPE_RSS: ret = flow_dv_action_update(dev, handle, update, error); diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c index ac8c3deaf0a..14a435d157c 100644 --- a/drivers/net/mlx5/mlx5_flow_meter.c +++ b/drivers/net/mlx5/mlx5_flow_meter.c @@ -1820,7 +1820,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, legacy_fm->idx = mtr_idx; fm = &legacy_fm->fm; } - mtr_id_bits = MLX5_REG_BITS - __builtin_clz(mtr_idx); + mtr_id_bits = MLX5_REG_BITS - rte_clz32(mtr_idx); if ((mtr_id_bits + priv->sh->mtrmng->max_mtr_flow_bits) > mtr_reg_bits) { DRV_LOG(ERR, "Meter number exceeds max limit."); diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c index 19e1835c971..14a2a8b9b4c 100644 --- a/drivers/net/mlx5/mlx5_flow_quota.c +++ b/drivers/net/mlx5/mlx5_flow_quota.c @@ -632,19 +632,22 @@ mlx5_flow_quota_destroy(struct rte_eth_dev *dev) struct mlx5_quota_ctx *qctx = &priv->quota_ctx; int ret; - if (qctx->quota_ipool) - mlx5_ipool_destroy(qctx->quota_ipool); - mlx5_quota_destroy_sq(priv); - mlx5_quota_destroy_read_buf(priv); if (qctx->dr_action) { ret = mlx5dr_action_destroy(qctx->dr_action); if (ret) DRV_LOG(ERR, "QUOTA: failed to destroy DR action"); } - if (qctx->devx_obj) { - ret = mlx5_devx_cmd_destroy(qctx->devx_obj); - if (ret) - DRV_LOG(ERR, "QUOTA: failed to destroy MTR ASO object"); + if (!priv->shared_host) { + if (qctx->quota_ipool) + mlx5_ipool_destroy(qctx->quota_ipool); + mlx5_quota_destroy_sq(priv); + mlx5_quota_destroy_read_buf(priv); + if (qctx->devx_obj) { + ret = mlx5_devx_cmd_destroy(qctx->devx_obj); + if (ret) + DRV_LOG(ERR, + "QUOTA: failed to destroy MTR ASO object"); + } } memset(qctx, 0, sizeof(*qctx)); return 0; @@ -652,14 +655,27 @@ mlx5_flow_quota_destroy(struct rte_eth_dev *dev) #define MLX5_QUOTA_IPOOL_TRUNK_SIZE (1u << 12) #define MLX5_QUOTA_IPOOL_CACHE_SIZE (1u << 13) -int -mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas) + +static int +mlx5_quota_init_guest(struct mlx5_priv *priv) +{ + struct mlx5_quota_ctx *qctx = &priv->quota_ctx; + struct rte_eth_dev *host_dev = priv->shared_host; + struct mlx5_priv *host_priv = host_dev->data->dev_private; + + /** + * Shared quota object can be used in flow rules only. + * DR5 flow action needs access to ASO abjects. + */ + qctx->devx_obj = host_priv->quota_ctx.devx_obj; + return 0; +} + +static int +mlx5_quota_init_host(struct mlx5_priv *priv, uint32_t nb_quotas) { - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_quota_ctx *qctx = &priv->quota_ctx; - int reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL); - uint32_t flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX; struct mlx5_indexed_pool_config quota_ipool_cfg = { .size = sizeof(struct mlx5_quota), .trunk_size = RTE_MIN(nb_quotas, MLX5_QUOTA_IPOOL_TRUNK_SIZE), @@ -680,32 +696,18 @@ mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas) DRV_LOG(DEBUG, "QUOTA: no MTR support"); return -ENOTSUP; } - if (reg_id < 0) { - DRV_LOG(DEBUG, "QUOTA: MRT register not available"); - return -ENOTSUP; - } qctx->devx_obj = mlx5_devx_cmd_create_flow_meter_aso_obj (sh->cdev->ctx, sh->cdev->pdn, rte_log2_u32(nb_quotas >> 1)); if (!qctx->devx_obj) { DRV_LOG(DEBUG, "QUOTA: cannot allocate MTR ASO objects"); return -ENOMEM; } - if (sh->config.dv_esw_en && priv->master) - flags |= MLX5DR_ACTION_FLAG_HWS_FDB; - qctx->dr_action = mlx5dr_action_create_aso_meter - (priv->dr_ctx, (struct mlx5dr_devx_obj *)qctx->devx_obj, - reg_id - REG_C_0, flags); - if (!qctx->dr_action) { - DRV_LOG(DEBUG, "QUOTA: failed to create DR action"); - ret = -ENOMEM; - goto err; - } ret = mlx5_quota_alloc_read_buf(priv); if (ret) - goto err; + return ret; ret = mlx5_quota_alloc_sq(priv); if (ret) - goto err; + return ret; if (nb_quotas < MLX5_QUOTA_IPOOL_TRUNK_SIZE) quota_ipool_cfg.per_core_cache = 0; else if (nb_quotas < MLX5_HW_IPOOL_SIZE_THRESHOLD) @@ -715,10 +717,40 @@ mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas) qctx->quota_ipool = mlx5_ipool_create("a_ipool_cfg); if (!qctx->quota_ipool) { DRV_LOG(DEBUG, "QUOTA: failed to allocate quota pool"); + return -ENOMEM; + } + return 0; +} + +int +mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_quota_ctx *qctx = &priv->quota_ctx; + uint32_t flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX; + int reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL); + int ret; + + if (reg_id < 0) { + DRV_LOG(DEBUG, "QUOTA: MRT register not available"); + return -ENOTSUP; + } + if (!priv->shared_host) + ret = mlx5_quota_init_host(priv, nb_quotas); + else + ret = mlx5_quota_init_guest(priv); + if (ret) + goto err; + if (priv->sh->config.dv_esw_en && priv->master) + flags |= MLX5DR_ACTION_FLAG_HWS_FDB; + qctx->dr_action = mlx5dr_action_create_aso_meter + (priv->dr_ctx, (struct mlx5dr_devx_obj *)qctx->devx_obj, + reg_id - REG_C_0, flags); + if (!qctx->dr_action) { + DRV_LOG(DEBUG, "QUOTA: failed to create DR action"); ret = -ENOMEM; goto err; } - qctx->nb_quotas = nb_quotas; return 0; err: mlx5_flow_quota_destroy(dev); diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c index 18d80f34baa..f556a9fbcc7 100644 --- a/drivers/net/mlx5/mlx5_hws_cnt.c +++ b/drivers/net/mlx5/mlx5_hws_cnt.c @@ -9,6 +9,7 @@ #include #include #include +#include #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) @@ -286,7 +287,7 @@ mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n) return NULL; } -static void * +static uint32_t mlx5_hws_cnt_svc(void *opaque) { struct mlx5_dev_ctx_shared *sh = @@ -318,7 +319,7 @@ mlx5_hws_cnt_svc(void *opaque) if (interval > query_us) rte_delay_us_sleep(sleep_us); } - return NULL; + return 0; } static void @@ -438,38 +439,37 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, int mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh) { -#define CNT_THREAD_NAME_MAX 256 - char name[CNT_THREAD_NAME_MAX]; - rte_cpuset_t cpuset; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; + rte_thread_attr_t attr; int ret; uint32_t service_core = sh->cnt_svc->service_core; - CPU_ZERO(&cpuset); + ret = rte_thread_attr_init(&attr); + if (ret != 0) + goto error; + CPU_SET(service_core, &attr.cpuset); sh->cnt_svc->svc_running = 1; - ret = pthread_create(&sh->cnt_svc->service_thread, NULL, - mlx5_hws_cnt_svc, sh); - if (ret != 0) { - DRV_LOG(ERR, "Failed to create HW steering's counter service thread."); - return -ENOSYS; - } - snprintf(name, CNT_THREAD_NAME_MAX - 1, "%s/svc@%d", - sh->ibdev_name, service_core); - rte_thread_set_name((rte_thread_t){(uintptr_t)sh->cnt_svc->service_thread}, - name); - CPU_SET(service_core, &cpuset); - pthread_setaffinity_np(sh->cnt_svc->service_thread, sizeof(cpuset), - &cpuset); + ret = rte_thread_create(&sh->cnt_svc->service_thread, + &attr, mlx5_hws_cnt_svc, sh); + if (ret != 0) + goto error; + snprintf(name, sizeof(name), "mlx5-cn%d", service_core); + rte_thread_set_prefixed_name(sh->cnt_svc->service_thread, name); + return 0; +error: + DRV_LOG(ERR, "Failed to create HW steering's counter service thread."); + return ret; } void mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh) { - if (sh->cnt_svc->service_thread == 0) + if (sh->cnt_svc->service_thread.opaque_id == 0) return; sh->cnt_svc->svc_running = 0; - pthread_join(sh->cnt_svc->service_thread, NULL); - sh->cnt_svc->service_thread = 0; + rte_thread_join(sh->cnt_svc->service_thread, NULL); + sh->cnt_svc->service_thread.opaque_id = 0; } static int diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h index 4d0d05c3764..cccfa7f2d31 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h @@ -1183,7 +1183,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, comp_idx = ((__vector unsigned long)comp_mask)[0]; /* F.3 get the first compressed CQE. */ - comp_idx = comp_idx ? __builtin_ctzll(comp_idx) / + comp_idx = comp_idx ? rte_ctz64(comp_idx) / (sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP; /* E.6 mask out entries after the compressed CQE. */ @@ -1202,7 +1202,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, /* E.7 count non-compressed valid CQEs. */ n = ((__vector unsigned long)invalid_mask)[0]; - n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) : + n = n ? rte_ctz64(n) / (sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP; nocmp_n += n; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h index 0766952255e..2bdd1f676da 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -753,7 +753,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, comp_idx = _mm_cvtsi128_si64(comp_mask); /* F.3 get the first compressed CQE. */ comp_idx = comp_idx ? - __builtin_ctzll(comp_idx) / + rte_ctz64(comp_idx) / (sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP; /* E.6 mask out entries after the compressed CQE. */ @@ -762,7 +762,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, invalid_mask = _mm_or_si128(invalid_mask, mask); /* E.7 count non-compressed valid CQEs. */ n = _mm_cvtsi128_si64(invalid_mask); - n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) : + n = n ? rte_ctz64(n) / (sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP; nocmp_n += n; /* D.2 get the final invalid mask. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 8cb52b0f7d8..b584055fa84 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1381,6 +1381,11 @@ int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id, struct mlx5_priv *priv; priv = dev->data->dev_private; + if (!mlx5_devx_obj_ops_en(priv->sh)) { + DRV_LOG(ERR, "Tx affinity mapping isn't supported by Verbs API."); + rte_errno = ENOTSUP; + return -rte_errno; + } txq = (*priv->txqs)[tx_queue_id]; if (!txq) return -1; diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index b295702fd4a..4db738785f0 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -535,7 +535,7 @@ mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) return NULL; } MLX5_ASSERT(slab); - iidx += __builtin_ctzll(slab); + iidx += rte_ctz64(slab); MLX5_ASSERT(iidx != UINT32_MAX); MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); rte_bitmap_clear(trunk->bmp, iidx); @@ -783,7 +783,7 @@ mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos) } return NULL; } - iidx += __builtin_ctzll(slab); + iidx += rte_ctz64(slab); rte_bitmap_clear(ibmp, iidx); iidx++; *pos = iidx; diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c index d7953ac7cba..daa69e533ad 100644 --- a/drivers/net/mvneta/mvneta_ethdev.c +++ b/drivers/net/mvneta/mvneta_ethdev.c @@ -376,6 +376,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) goto out; } + /* start rx queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + /* start tx queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; @@ -400,6 +404,7 @@ static int mvneta_dev_stop(struct rte_eth_dev *dev) { struct mvneta_priv *priv = dev->data->dev_private; + uint16_t i; dev->data->dev_started = 0; @@ -412,6 +417,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) priv->ppio = NULL; + /* stop rx queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + /* stop tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c index 89c83f1c1f8..c12364941d6 100644 --- a/drivers/net/mvpp2/mrvl_ethdev.c +++ b/drivers/net/mvpp2/mrvl_ethdev.c @@ -193,7 +193,7 @@ static struct { static inline int mrvl_reserve_bit(int *bitmap, int max) { - int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); + int n = sizeof(*bitmap) * 8 - rte_clz32(*bitmap); if (n >= max) return -1; @@ -951,6 +951,9 @@ mrvl_dev_start(struct rte_eth_dev *dev) goto out; } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + mrvl_flow_init(dev); mrvl_mtr_init(dev); mrvl_set_tx_function(dev); @@ -1076,6 +1079,13 @@ mrvl_flush_bpool(struct rte_eth_dev *dev) static int mrvl_dev_stop(struct rte_eth_dev *dev) { + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return mrvl_dev_set_link_down(dev); } diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c index d0bbc0a4c0c..b8a32832d71 100644 --- a/drivers/net/netvsc/hn_ethdev.c +++ b/drivers/net/netvsc/hn_ethdev.c @@ -990,7 +990,7 @@ static int hn_dev_start(struct rte_eth_dev *dev) { struct hn_data *hv = dev->data->dev_private; - int error; + int i, error; PMD_INIT_FUNC_TRACE(); @@ -1017,6 +1017,11 @@ hn_dev_start(struct rte_eth_dev *dev) if (error == 0) hn_dev_link_update(dev, 0); + for (i = 0; i < hv->num_queues; i++) { + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + return error; } @@ -1024,13 +1029,21 @@ static int hn_dev_stop(struct rte_eth_dev *dev) { struct hn_data *hv = dev->data->dev_private; + int i, ret; PMD_INIT_FUNC_TRACE(); dev->data->dev_started = 0; rte_dev_event_callback_unregister(NULL, netvsc_hotadd_callback, hv); hn_rndis_set_rxfilter(hv, 0); - return hn_vf_stop(dev); + ret = hn_vf_stop(dev); + + for (i = 0; i < hv->num_queues; i++) { + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return ret; } static int @@ -1059,37 +1072,6 @@ hn_dev_close(struct rte_eth_dev *dev) return ret; } -static const struct eth_dev_ops hn_eth_dev_ops = { - .dev_configure = hn_dev_configure, - .dev_start = hn_dev_start, - .dev_stop = hn_dev_stop, - .dev_close = hn_dev_close, - .dev_infos_get = hn_dev_info_get, - .txq_info_get = hn_dev_tx_queue_info, - .rxq_info_get = hn_dev_rx_queue_info, - .dev_supported_ptypes_get = hn_vf_supported_ptypes, - .promiscuous_enable = hn_dev_promiscuous_enable, - .promiscuous_disable = hn_dev_promiscuous_disable, - .allmulticast_enable = hn_dev_allmulticast_enable, - .allmulticast_disable = hn_dev_allmulticast_disable, - .set_mc_addr_list = hn_dev_mc_addr_list, - .reta_update = hn_rss_reta_update, - .reta_query = hn_rss_reta_query, - .rss_hash_update = hn_rss_hash_update, - .rss_hash_conf_get = hn_rss_hash_conf_get, - .tx_queue_setup = hn_dev_tx_queue_setup, - .tx_queue_release = hn_dev_tx_queue_release, - .tx_done_cleanup = hn_dev_tx_done_cleanup, - .rx_queue_setup = hn_dev_rx_queue_setup, - .rx_queue_release = hn_dev_rx_queue_release, - .link_update = hn_dev_link_update, - .stats_get = hn_dev_stats_get, - .stats_reset = hn_dev_stats_reset, - .xstats_get = hn_dev_xstats_get, - .xstats_get_names = hn_dev_xstats_get_names, - .xstats_reset = hn_dev_xstats_reset, -}; - /* * Setup connection between PMD and kernel. */ @@ -1129,12 +1111,158 @@ hn_detach(struct hn_data *hv) hn_rndis_detach(hv); } +/* + * Connects EXISTING rx/tx queues to NEW vmbus channel(s), and + * re-initializes NDIS and RNDIS, including re-sending initial + * NDIS/RNDIS configuration. To be used after the underlying vmbus + * has been un- and re-mapped, e.g. as must happen when the device + * MTU is changed. + */ +static int +hn_reinit(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct hn_data *hv = dev->data->dev_private; + struct hn_rx_queue **rxqs = (struct hn_rx_queue **)dev->data->rx_queues; + struct hn_tx_queue **txqs = (struct hn_tx_queue **)dev->data->tx_queues; + int i, ret = 0; + + /* Point primary queues at new primary channel */ + rxqs[0]->chan = hv->channels[0]; + txqs[0]->chan = hv->channels[0]; + + ret = hn_attach(hv, mtu); + if (ret) + return ret; + + /* Create vmbus subchannels, additional RNDIS configuration */ + ret = hn_dev_configure(dev); + if (ret) + return ret; + + /* Point any additional queues at new subchannels */ + for (i = 1; i < dev->data->nb_rx_queues; i++) + rxqs[i]->chan = hv->channels[i]; + for (i = 1; i < dev->data->nb_tx_queues; i++) + txqs[i]->chan = hv->channels[i]; + + return ret; +} + +static int +hn_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct hn_data *hv = dev->data->dev_private; + unsigned int orig_mtu = dev->data->mtu; + uint32_t rndis_mtu; + int ret = 0; + int i; + + if (dev->data->dev_started) { + PMD_DRV_LOG(ERR, "Device must be stopped before changing MTU"); + return -EBUSY; + } + + /* Change MTU of underlying VF dev first, if it exists */ + ret = hn_vf_mtu_set(dev, mtu); + if (ret) + return ret; + + /* Release channel resources */ + hn_detach(hv); + + /* Close any secondary vmbus channels */ + for (i = 1; i < hv->num_queues; i++) + rte_vmbus_chan_close(hv->channels[i]); + + /* Close primary vmbus channel */ + rte_free(hv->channels[0]); + + /* Unmap and re-map vmbus device */ + rte_vmbus_unmap_device(hv->vmbus); + ret = rte_vmbus_map_device(hv->vmbus); + if (ret) { + /* This is a catastrophic error - the device is unusable */ + PMD_DRV_LOG(ERR, "Could not re-map vmbus device!"); + return ret; + } + + /* Update pointers to re-mapped UIO resources */ + hv->rxbuf_res = hv->vmbus->resource[HV_RECV_BUF_MAP]; + hv->chim_res = hv->vmbus->resource[HV_SEND_BUF_MAP]; + + /* Re-open the primary vmbus channel */ + ret = rte_vmbus_chan_open(hv->vmbus, &hv->channels[0]); + if (ret) { + /* This is a catastrophic error - the device is unusable */ + PMD_DRV_LOG(ERR, "Could not re-open vmbus channel!"); + return ret; + } + + rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency); + + ret = hn_reinit(dev, mtu); + if (!ret) + goto out; + + /* In case of error, attempt to restore original MTU */ + ret = hn_reinit(dev, orig_mtu); + if (ret) + PMD_DRV_LOG(ERR, "Restoring original MTU failed for netvsc"); + + ret = hn_vf_mtu_set(dev, orig_mtu); + if (ret) + PMD_DRV_LOG(ERR, "Restoring original MTU failed for VF"); + +out: + if (hn_rndis_get_mtu(hv, &rndis_mtu)) { + PMD_DRV_LOG(ERR, "Could not get MTU via RNDIS"); + } else { + dev->data->mtu = (uint16_t)rndis_mtu; + PMD_DRV_LOG(DEBUG, "RNDIS MTU is %u", dev->data->mtu); + } + + return ret; +} + +static const struct eth_dev_ops hn_eth_dev_ops = { + .dev_configure = hn_dev_configure, + .dev_start = hn_dev_start, + .dev_stop = hn_dev_stop, + .dev_close = hn_dev_close, + .dev_infos_get = hn_dev_info_get, + .txq_info_get = hn_dev_tx_queue_info, + .rxq_info_get = hn_dev_rx_queue_info, + .dev_supported_ptypes_get = hn_vf_supported_ptypes, + .promiscuous_enable = hn_dev_promiscuous_enable, + .promiscuous_disable = hn_dev_promiscuous_disable, + .allmulticast_enable = hn_dev_allmulticast_enable, + .allmulticast_disable = hn_dev_allmulticast_disable, + .set_mc_addr_list = hn_dev_mc_addr_list, + .mtu_set = hn_dev_mtu_set, + .reta_update = hn_rss_reta_update, + .reta_query = hn_rss_reta_query, + .rss_hash_update = hn_rss_hash_update, + .rss_hash_conf_get = hn_rss_hash_conf_get, + .tx_queue_setup = hn_dev_tx_queue_setup, + .tx_queue_release = hn_dev_tx_queue_release, + .tx_done_cleanup = hn_dev_tx_done_cleanup, + .rx_queue_setup = hn_dev_rx_queue_setup, + .rx_queue_release = hn_dev_rx_queue_release, + .link_update = hn_dev_link_update, + .stats_get = hn_dev_stats_get, + .stats_reset = hn_dev_stats_reset, + .xstats_get = hn_dev_xstats_get, + .xstats_get_names = hn_dev_xstats_get_names, + .xstats_reset = hn_dev_xstats_reset, +}; + static int eth_hn_dev_init(struct rte_eth_dev *eth_dev) { struct hn_data *hv = eth_dev->data->dev_private; struct rte_device *device = eth_dev->device; struct rte_vmbus_device *vmbus; + uint32_t mtu; unsigned int rxr_cnt; int err, max_chan; @@ -1218,6 +1346,12 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) if (err) goto failed; + err = hn_rndis_get_mtu(hv, &mtu); + if (err) + goto failed; + eth_dev->data->mtu = (uint16_t)mtu; + PMD_INIT_LOG(DEBUG, "RNDIS MTU is %u", eth_dev->data->mtu); + err = hn_rndis_get_eaddr(hv, eth_dev->data->mac_addrs->addr_bytes); if (err) goto failed; @@ -1272,7 +1406,7 @@ eth_hn_dev_uninit(struct rte_eth_dev *eth_dev) hn_detach(hv); hn_chim_uninit(eth_dev); - rte_vmbus_chan_close(hv->primary->chan); + rte_vmbus_chan_close(hv->channels[0]); rte_free(hv->primary); ret = rte_eth_dev_owner_delete(hv->owner.id); if (ret != 0) diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c index 29c6009b2cb..1ba75ee804a 100644 --- a/drivers/net/netvsc/hn_rndis.c +++ b/drivers/net/netvsc/hn_rndis.c @@ -35,7 +35,7 @@ #include "hn_rndis.h" #include "ndis.h" -#define RNDIS_TIMEOUT_SEC 5 +#define RNDIS_TIMEOUT_SEC 60 #define RNDIS_DELAY_MS 10 #define HN_RNDIS_XFER_SIZE 0x4000 @@ -1111,6 +1111,13 @@ hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr) return 0; } +int +hn_rndis_get_mtu(struct hn_data *hv, uint32_t *mtu) +{ + return hn_rndis_query(hv, OID_GEN_MAXIMUM_FRAME_SIZE, NULL, 0, + mtu, sizeof(uint32_t)); +} + int hn_rndis_get_linkstatus(struct hn_data *hv) { diff --git a/drivers/net/netvsc/hn_rndis.h b/drivers/net/netvsc/hn_rndis.h index 9a8251fc2fb..7f40f6221de 100644 --- a/drivers/net/netvsc/hn_rndis.h +++ b/drivers/net/netvsc/hn_rndis.h @@ -10,6 +10,7 @@ void hn_rndis_link_status(struct rte_eth_dev *dev, const void *msg); int hn_rndis_attach(struct hn_data *hv); void hn_rndis_detach(struct hn_data *hv); int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr); +int hn_rndis_get_mtu(struct hn_data *hv, uint32_t *mtu); int hn_rndis_get_linkstatus(struct hn_data *hv); int hn_rndis_get_linkspeed(struct hn_data *hv); int hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter); diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c index bc6f60c64ad..e4f5015aa3c 100644 --- a/drivers/net/netvsc/hn_rxtx.c +++ b/drivers/net/netvsc/hn_rxtx.c @@ -116,7 +116,7 @@ hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m) uint32_t bin; /* count zeros, and offset into correct bin */ - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + bin = (sizeof(s) * 8) - rte_clz32(s) - 5; stats->size_bins[bin]++; } else { if (s < 64) diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h index e1f8e69a286..e37946804db 100644 --- a/drivers/net/netvsc/hn_var.h +++ b/drivers/net/netvsc/hn_var.h @@ -13,7 +13,7 @@ * Tunable ethdev params */ #define HN_MIN_RX_BUF_SIZE 1024 -#define HN_MAX_XFER_LEN 2048 +#define HN_MAX_XFER_LEN RTE_ETHER_MAX_JUMBO_FRAME_LEN #define HN_MAX_MAC_ADDRS 1 #define HN_MAX_CHANNELS 64 @@ -287,6 +287,7 @@ int hn_vf_rss_hash_update(struct rte_eth_dev *dev, int hn_vf_reta_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); int hn_eth_rmv_event_callback(uint16_t port_id, enum rte_eth_event_type event __rte_unused, void *cb_arg, void *out __rte_unused); diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c index 782395d805e..90cb6f69236 100644 --- a/drivers/net/netvsc/hn_vf.c +++ b/drivers/net/netvsc/hn_vf.c @@ -239,7 +239,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) port = hv->vf_ctx.vf_port; - /* If the primary device has started, this is a VF host add. + /* If the primary device has started, this is a VF hot add. * Configure and start VF device. */ if (dev->data->dev_started) { @@ -264,6 +264,12 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) goto exit; } + ret = hn_vf_mtu_set(dev, dev->data->mtu); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set VF MTU"); + goto exit; + } + PMD_DRV_LOG(NOTICE, "Starting VF port %d", port); ret = rte_eth_dev_start(port); if (ret) { @@ -778,3 +784,18 @@ int hn_vf_reta_hash_update(struct rte_eth_dev *dev, return ret; } + +int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (hv->vf_ctx.vf_vsc_switched && vf_dev) + ret = vf_dev->dev_ops->mtu_set(vf_dev, mtu); + rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; +} diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c new file mode 100644 index 00000000000..f89003be8b4 --- /dev/null +++ b/drivers/net/nfp/flower/nfp_conntrack.c @@ -0,0 +1,1767 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Corigine, Inc. + * All rights reserved. + */ + +#include "nfp_conntrack.h" + +#include +#include +#include + +#include "../nfp_logs.h" +#include "nfp_flower_cmsg.h" +#include "nfp_flower_representor.h" + +struct ct_data { + uint8_t ct_state; /* Connection state. */ + uint16_t ct_zone; /* Connection zone. */ +}; + +enum ct_entry_type { + CT_TYPE_PRE_CT, + CT_TYPE_POST_CT, +}; + +struct nfp_initial_flow { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint8_t items_cnt; + uint8_t actions_cnt; +}; + +struct nfp_ct_flow_entry { + uint64_t cookie; + LIST_ENTRY(nfp_ct_flow_entry) pre_ct_list; + LIST_ENTRY(nfp_ct_flow_entry) post_ct_list; + LIST_HEAD(, nfp_ct_merge_entry) children; + enum ct_entry_type type; + struct nfp_flower_representor *repr; + struct nfp_ct_zone_entry *ze; + struct nfp_initial_flow rule; + struct nfp_fl_stats stats; +}; + +struct nfp_ct_map_entry { + uint64_t cookie; + struct nfp_ct_flow_entry *fe; +}; + +struct nfp_ct_zone_entry { + uint32_t zone; + struct nfp_flow_priv *priv; + LIST_HEAD(, nfp_ct_flow_entry) pre_ct_list; + LIST_HEAD(, nfp_ct_flow_entry) post_ct_list; + struct rte_hash *ct_merge_table; +}; + +struct nfp_ct_merge_entry { + uint64_t cookie[2]; + uint32_t ctx_id; + LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list; + LIST_ENTRY(nfp_ct_merge_entry) post_ct_list; + struct nfp_initial_flow rule; + struct rte_flow *compiled_rule; + struct nfp_ct_zone_entry *ze; + struct nfp_ct_flow_entry *pre_ct_parent; + struct nfp_ct_flow_entry *post_ct_parent; +}; + +/* OVS_KEY_ATTR_CT_STATE flags */ +#define OVS_CS_F_NEW 0x01 /* Beginning of a new connection. */ +#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */ +#define OVS_CS_F_RELATED 0x04 /* Related to an established connection. */ +#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */ +#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */ +#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */ +#define OVS_CS_F_SRC_NAT 0x40 /* Packet's source address/port was mangled by NAT. */ +#define OVS_CS_F_DST_NAT 0x80 /* Packet's destination address/port was mangled by NAT. */ + +typedef void (*nfp_action_free_fn)(void *field); +typedef bool (*nfp_action_copy_fn)(const void *src, void *dst); + +static bool +is_pre_ct_flow(const struct ct_data *ct, + const struct rte_flow_action *actions) +{ + const struct rte_flow_action *action; + + if (ct == NULL) + return false; + + for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { + if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) + return true; + } + + return false; +} + +static bool +is_post_ct_flow(const struct ct_data *ct) +{ + if (ct == NULL) + return false; + + if ((ct->ct_state & OVS_CS_F_ESTABLISHED) != 0) + return true; + + return false; +} + +static bool +is_ct_commit_flow(const struct ct_data *ct) +{ + if (ct == NULL) + return false; + + if ((ct->ct_state & OVS_CS_F_NEW) != 0) + return true; + + return false; +} + +static struct nfp_ct_merge_entry * +nfp_ct_merge_table_search(struct nfp_ct_zone_entry *ze, + char *hash_data, + uint32_t hash_len) +{ + int index; + uint32_t hash_key; + struct nfp_ct_merge_entry *m_ent; + + hash_key = rte_jhash(hash_data, hash_len, ze->priv->hash_seed); + index = rte_hash_lookup_data(ze->ct_merge_table, &hash_key, (void **)&m_ent); + if (index < 0) { + PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table"); + return NULL; + } + + return m_ent; +} + +static bool +nfp_ct_merge_table_add(struct nfp_ct_zone_entry *ze, + struct nfp_ct_merge_entry *merge_entry) +{ + int ret; + uint32_t hash_key; + + hash_key = rte_jhash(merge_entry, sizeof(uint64_t) * 2, ze->priv->hash_seed); + ret = rte_hash_add_key_data(ze->ct_merge_table, &hash_key, merge_entry); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Add to ct_merge table failed"); + return false; + } + + return true; +} + +static void +nfp_ct_merge_table_delete(struct nfp_ct_zone_entry *ze, + struct nfp_ct_merge_entry *m_ent) +{ + int ret; + uint32_t hash_key; + + hash_key = rte_jhash(m_ent, sizeof(uint64_t) * 2, ze->priv->hash_seed); + ret = rte_hash_del_key(ze->ct_merge_table, &hash_key); + if (ret < 0) + PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d", ret); +} + +static void +nfp_ct_merge_entry_destroy(struct nfp_ct_merge_entry *m_ent) +{ + struct nfp_ct_zone_entry *ze; + + ze = m_ent->ze; + nfp_ct_merge_table_delete(ze, m_ent); + + rte_free(m_ent->rule.actions); + rte_free(m_ent->rule.items); + LIST_REMOVE(m_ent, pre_ct_list); + LIST_REMOVE(m_ent, post_ct_list); + rte_free(m_ent); +} + +struct nfp_ct_map_entry * +nfp_ct_map_table_search(struct nfp_flow_priv *priv, + char *hash_data, + uint32_t hash_len) +{ + int index; + uint32_t hash_key; + struct nfp_ct_map_entry *me; + + hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); + index = rte_hash_lookup_data(priv->ct_map_table, &hash_key, (void **)&me); + if (index < 0) { + PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table"); + return NULL; + } + + return me; +} + +static bool +nfp_ct_map_table_add(struct nfp_flow_priv *priv, + struct nfp_ct_map_entry *me) +{ + int ret; + uint32_t hash_key; + + hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed); + ret = rte_hash_add_key_data(priv->ct_map_table, &hash_key, me); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Add to ct_map table failed"); + return false; + } + + return true; +} + +static void +nfp_ct_map_table_delete(struct nfp_flow_priv *priv, + struct nfp_ct_map_entry *me) +{ + int ret; + uint32_t hash_key; + + hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed); + ret = rte_hash_del_key(priv->ct_map_table, &hash_key); + if (ret < 0) + PMD_DRV_LOG(ERR, "Delete form ct_map table failed"); +} + +static void +nfp_ct_map_entry_destroy(struct nfp_ct_map_entry *me) +{ + rte_free(me); +} + +static void +nfp_ct_flow_item_free_real(void *field, + enum rte_flow_item_type type) +{ + switch (type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_VLAN: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_IPV4: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_IPV6: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_TCP: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_UDP: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_SCTP: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_VXLAN: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_GRE: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_GRE_KEY: /* FALLTHROUGH */ + case RTE_FLOW_ITEM_TYPE_GENEVE: + rte_free(field); + break; + default: + break; + } +} + +static void +nfp_ct_flow_item_free(struct rte_flow_item *item) +{ + if (item->spec != NULL) + nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->spec, item->type); + + if (item->mask != NULL) + nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->mask, item->type); + + if (item->last != NULL) + nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->last, item->type); +} + +static void +nfp_ct_flow_items_free(struct rte_flow_item *items, + uint8_t item_cnt) +{ + uint8_t loop; + + for (loop = 0; loop < item_cnt; ++loop) + nfp_ct_flow_item_free(items + loop); +} + +static bool +nfp_flow_item_conf_size_get(enum rte_flow_item_type type, + size_t *size) +{ + size_t len = 0; + + switch (type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + len = sizeof(struct rte_flow_item_eth); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + len = sizeof(struct rte_flow_item_vlan); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + len = sizeof(struct rte_flow_item_ipv4); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + len = sizeof(struct rte_flow_item_ipv6); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + len = sizeof(struct rte_flow_item_tcp); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + len = sizeof(struct rte_flow_item_udp); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + len = sizeof(struct rte_flow_item_sctp); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + len = sizeof(struct rte_flow_item_vxlan); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + len = sizeof(struct rte_flow_item_gre); + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + len = sizeof(rte_be32_t); + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + len = sizeof(struct rte_flow_item_geneve); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported item type: %d", type); + *size = 0; + return false; + } + + *size = len; + + return true; +} + +static void * +nfp_ct_flow_item_copy_real(const void *src, + enum rte_flow_item_type type) +{ + bool ret; + void *dst; + size_t len; + + ret = nfp_flow_item_conf_size_get(type, &len); + if (!ret) { + PMD_DRV_LOG(ERR, "Get flow item conf size failed"); + return NULL; + } + + dst = rte_zmalloc("flow_item", len, 0); + if (dst == NULL) { + PMD_DRV_LOG(ERR, "Malloc memory for ct item failed"); + return NULL; + } + + rte_memcpy(dst, src, len); + + return dst; +} + +static bool +nfp_ct_flow_item_copy(const struct rte_flow_item *src, + struct rte_flow_item *dst) +{ + dst->type = src->type; + + if (src->spec != NULL) { + dst->spec = nfp_ct_flow_item_copy_real(src->spec, src->type); + if (dst->spec == NULL) { + PMD_DRV_LOG(ERR, "Copy spec of ct item failed"); + goto end; + } + } + + if (src->mask != NULL) { + dst->mask = nfp_ct_flow_item_copy_real(src->mask, src->type); + if (dst->mask == NULL) { + PMD_DRV_LOG(ERR, "Copy mask of ct item failed"); + goto free_spec; + } + } + + if (src->last != NULL) { + dst->last = nfp_ct_flow_item_copy_real(src->last, src->type); + if (dst->last == NULL) { + PMD_DRV_LOG(ERR, "Copy last of ct item failed"); + goto free_mask; + } + } + + return true; + +free_mask: + nfp_ct_flow_item_free_real((void *)(ptrdiff_t)dst->mask, dst->type); +free_spec: + nfp_ct_flow_item_free_real((void *)(ptrdiff_t)dst->spec, dst->type); +end: + return false; +} + +static bool +nfp_ct_flow_items_copy(const struct rte_flow_item *src, + struct rte_flow_item *dst, + uint8_t item_cnt) +{ + bool ret; + uint8_t loop; + + for (loop = 0; loop < item_cnt; ++loop) { + ret = nfp_ct_flow_item_copy(src + loop, dst + loop); + if (!ret) { + PMD_DRV_LOG(ERR, "Copy ct item failed"); + nfp_ct_flow_items_free(dst, loop); + return false; + } + } + + return true; +} + +static void +nfp_ct_flow_action_free_real(void *field, + nfp_action_free_fn func) +{ + if (func != NULL) + func(field); + + rte_free(field); +} + +static void +nfp_ct_flow_action_free_vxlan(void *field) +{ + struct vxlan_data *vxlan = field; + + nfp_ct_flow_items_free(vxlan->items, ACTION_VXLAN_ENCAP_ITEMS_NUM); +} + +static void +nfp_ct_flow_action_free_raw(void *field) +{ + struct rte_flow_action_raw_encap *raw_encap = field; + + rte_free(raw_encap->data); +} + +static void +nfp_ct_flow_action_free(struct rte_flow_action *action) +{ + nfp_action_free_fn func = NULL; + + if (action->conf == NULL) + return; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_VOID: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_DROP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_COUNT: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_JUMP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + return; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_PORT_ID: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TTL: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + func = nfp_ct_flow_action_free_vxlan; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + func = nfp_ct_flow_action_free_raw; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type); + break; + } + + nfp_ct_flow_action_free_real((void *)(ptrdiff_t)action->conf, func); +} + +static void +nfp_ct_flow_actions_free(struct rte_flow_action *actions, + uint8_t action_cnt) +{ + uint8_t loop; + + for (loop = 0; loop < action_cnt; ++loop) + nfp_ct_flow_action_free(actions + loop); +} + +static void * +nfp_ct_flow_action_copy_real(const void *src, + size_t len, + nfp_action_copy_fn func) +{ + bool ret; + void *dst; + + dst = rte_zmalloc("flow_action", len, 0); + if (dst == NULL) { + PMD_DRV_LOG(ERR, "Malloc memory for ct action failed"); + return NULL; + } + + if (func != NULL) { + ret = func(src, dst); + if (!ret) { + PMD_DRV_LOG(ERR, "Copy ct action failed"); + return NULL; + } + + return dst; + } + + rte_memcpy(dst, src, len); + + return dst; +} + +static bool +nfp_ct_flow_action_copy_vxlan(const void *src, + void *dst) +{ + struct vxlan_data *vxlan_dst = dst; + const struct vxlan_data *vxlan_src = src; + + vxlan_dst->conf.definition = vxlan_dst->items; + return nfp_ct_flow_items_copy(vxlan_src->items, vxlan_dst->items, + ACTION_VXLAN_ENCAP_ITEMS_NUM); +} + +static bool +nfp_ct_flow_action_copy_raw(const void *src, + void *dst) +{ + struct rte_flow_action_raw_encap *raw_dst = dst; + const struct rte_flow_action_raw_encap *raw_src = src; + + raw_dst->size = raw_src->size; + raw_dst->data = nfp_ct_flow_action_copy_real(raw_src->data, + raw_src->size, NULL); + if (raw_dst->data == NULL) { + PMD_DRV_LOG(ERR, "Copy ct action process failed"); + return false; + } + + return true; +} + +static bool +nfp_ct_flow_action_copy(const struct rte_flow_action *src, + struct rte_flow_action *dst) +{ + size_t len; + nfp_action_copy_fn func = NULL; + + dst->type = src->type; + + if (src->conf == NULL) + return true; + + switch (src->type) { + case RTE_FLOW_ACTION_TYPE_VOID: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_DROP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_COUNT: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_JUMP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + return true; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + len = sizeof(struct rte_flow_action_set_mac); + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + len = sizeof(struct rte_flow_action_port_id); + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + len = sizeof(struct rte_flow_action_of_push_vlan); + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + len = sizeof(struct rte_flow_action_set_ipv4); + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + len = sizeof(struct rte_flow_action_set_dscp); + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + len = sizeof(struct rte_flow_action_set_ipv6); + break; + case RTE_FLOW_ACTION_TYPE_SET_TTL: + len = sizeof(struct rte_flow_action_set_ttl); + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + len = sizeof(struct rte_flow_action_set_tp); + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + len = sizeof(struct vxlan_data); + func = nfp_ct_flow_action_copy_vxlan; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + len = sizeof(struct rte_flow_action_raw_encap); + func = nfp_ct_flow_action_copy_raw; + break; + default: + PMD_DRV_LOG(DEBUG, "Unsupported action type: %d", src->type); + return false; + } + + dst->conf = nfp_ct_flow_action_copy_real(src->conf, len, func); + if (dst->conf == NULL) { + PMD_DRV_LOG(DEBUG, "Copy ct action process failed"); + return false; + } + + return true; +} + +static bool +nfp_ct_flow_actions_copy(const struct rte_flow_action *src, + struct rte_flow_action *dst, + uint8_t action_cnt) +{ + bool ret; + uint8_t loop; + + for (loop = 0; loop < action_cnt; ++loop) { + ret = nfp_ct_flow_action_copy(src + loop, dst + loop); + if (!ret) { + PMD_DRV_LOG(DEBUG, "Copy ct action failed"); + nfp_ct_flow_actions_free(dst, loop); + return false; + } + } + + return true; +} + +static struct nfp_ct_flow_entry * +nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze, + struct nfp_flower_representor *repr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + uint64_t cookie) +{ + bool ret; + uint8_t loop; + uint8_t item_cnt = 1; /* The RTE_FLOW_ITEM_TYPE_END */ + uint8_t action_cnt = 1; /* The RTE_FLOW_ACTION_TYPE_END */ + struct nfp_flow_priv *priv; + struct nfp_ct_map_entry *me; + struct nfp_ct_flow_entry *fe; + + fe = rte_zmalloc("ct_flow_entry", sizeof(*fe), 0); + if (fe == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry"); + return NULL; + } + + fe->ze = ze; + fe->repr = repr; + fe->cookie = cookie; + LIST_INIT(&fe->children); + + for (loop = 0; (items + loop)->type != RTE_FLOW_ITEM_TYPE_END; loop++) + item_cnt++; + for (loop = 0; (actions + loop)->type != RTE_FLOW_ACTION_TYPE_END; loop++) + action_cnt++; + + fe->rule.items = rte_zmalloc("ct_flow_item", + sizeof(struct rte_flow_item) * item_cnt, 0); + if (fe->rule.items == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc ct flow items"); + goto free_flow_entry; + } + + fe->rule.actions = rte_zmalloc("ct_flow_action", + sizeof(struct rte_flow_action) * action_cnt, 0); + if (fe->rule.actions == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc ct flow actions"); + goto free_flow_item; + } + + /* Deep copy of items */ + ret = nfp_ct_flow_items_copy(items, fe->rule.items, item_cnt); + if (!ret) { + PMD_DRV_LOG(ERR, "Could not deep copy ct flow items"); + goto free_flow_action; + } + + /* Deep copy of actions */ + ret = nfp_ct_flow_actions_copy(actions, fe->rule.actions, action_cnt); + if (!ret) { + PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions"); + goto free_copied_items; + } + + fe->rule.items_cnt = item_cnt; + fe->rule.actions_cnt = action_cnt; + + /* Now add a ct map entry */ + me = rte_zmalloc("ct_map_entry", sizeof(*me), 0); + if (me == NULL) { + PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed"); + goto free_copied_actions; + } + + me->cookie = fe->cookie; + me->fe = fe; + + priv = repr->app_fw_flower->flow_priv; + ret = nfp_ct_map_table_add(priv, me); + if (!ret) { + PMD_DRV_LOG(ERR, "Add into ct map table failed"); + goto free_map_entry; + } + + return fe; + +free_map_entry: + nfp_ct_map_entry_destroy(me); +free_copied_actions: + nfp_ct_flow_actions_free(fe->rule.actions, action_cnt); +free_copied_items: + nfp_ct_flow_items_free(fe->rule.items, item_cnt); +free_flow_action: + rte_free(fe->rule.actions); +free_flow_item: + rte_free(fe->rule.items); +free_flow_entry: + rte_free(fe); + + return NULL; +} + +static void +nfp_flow_children_merge_free(struct nfp_ct_flow_entry *fe) +{ + struct nfp_ct_merge_entry *m_ent; + + switch (fe->type) { + case CT_TYPE_PRE_CT: + LIST_FOREACH(m_ent, &fe->children, pre_ct_list) + nfp_ct_merge_entry_destroy(m_ent); + break; + case CT_TYPE_POST_CT: + LIST_FOREACH(m_ent, &fe->children, post_ct_list) + nfp_ct_merge_entry_destroy(m_ent); + break; + default: + break; + } +} + +static void +nfp_ct_flow_entry_destroy_partly(struct nfp_ct_flow_entry *fe) +{ + struct nfp_ct_map_entry *me; + + if (!LIST_EMPTY(&fe->children)) + nfp_flow_children_merge_free(fe); + + me = nfp_ct_map_table_search(fe->ze->priv, (char *)&fe->cookie, sizeof(uint64_t)); + if (me != NULL) { + nfp_ct_map_table_delete(fe->ze->priv, me); + nfp_ct_map_entry_destroy(me); + } + + nfp_ct_flow_actions_free(fe->rule.actions, fe->rule.actions_cnt); + nfp_ct_flow_items_free(fe->rule.items, fe->rule.items_cnt); + rte_free(fe->rule.actions); + rte_free(fe->rule.items); + rte_free(fe); +} + +static void +nfp_ct_flow_entry_destroy(struct nfp_ct_flow_entry *fe) +{ + LIST_REMOVE(fe, pre_ct_list); + LIST_REMOVE(fe, post_ct_list); + + nfp_ct_flow_entry_destroy_partly(fe); +} + +static struct nfp_ct_zone_entry * +nfp_ct_zone_table_search(struct nfp_flow_priv *priv, + char *hash_data, + uint32_t hash_len) +{ + int index; + uint32_t hash_key; + struct nfp_ct_zone_entry *ze; + + hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); + index = rte_hash_lookup_data(priv->ct_zone_table, &hash_key, (void **)&ze); + if (index < 0) { + PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table"); + return NULL; + } + + return ze; +} + +static bool +nfp_ct_zone_table_add(struct nfp_flow_priv *priv, + struct nfp_ct_zone_entry *ze) +{ + int ret; + uint32_t hash_key; + + hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed); + ret = rte_hash_add_key_data(priv->ct_zone_table, &hash_key, ze); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Add to the ct_zone table failed"); + return false; + } + + return true; +} + +static void +nfp_ct_zone_table_delete(struct nfp_flow_priv *priv, + struct nfp_ct_zone_entry *ze) +{ + int ret; + uint32_t hash_key; + + hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed); + ret = rte_hash_del_key(priv->ct_zone_table, &hash_key); + if (ret < 0) + PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed"); +} + +static bool +nfp_ct_zone_entry_init(struct nfp_ct_zone_entry *ze, + struct nfp_flow_priv *priv, + uint32_t zone, + bool wildcard) +{ + char hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters ct_merge_hash_params = { + .entries = 1000, + .hash_func = rte_jhash, + .socket_id = rte_socket_id(), + .key_len = sizeof(uint32_t), + .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, + }; + + if (wildcard) { + ct_merge_hash_params.name = "ct_wc_merge_table"; + } else { + snprintf(hash_name, sizeof(hash_name), "ct_%d_merge_table", ze->zone); + ct_merge_hash_params.name = hash_name; + } + + ct_merge_hash_params.hash_func_init_val = priv->hash_seed; + ze->ct_merge_table = rte_hash_create(&ct_merge_hash_params); + if (ze->ct_merge_table == NULL) { + PMD_DRV_LOG(ERR, "ct merge table creation failed"); + return false; + } + + ze->zone = zone; + ze->priv = priv; + LIST_INIT(&ze->pre_ct_list); + LIST_INIT(&ze->post_ct_list); + + return true; +} + +static void +nfp_ct_zone_entry_destroy(struct nfp_ct_zone_entry *ze) +{ + struct nfp_ct_flow_entry *fe; + + if (ze == NULL) + return; + + rte_hash_free(ze->ct_merge_table); + + LIST_FOREACH(fe, &ze->pre_ct_list, pre_ct_list) + nfp_ct_flow_entry_destroy(fe); + + LIST_FOREACH(fe, &ze->post_ct_list, post_ct_list) + nfp_ct_flow_entry_destroy(fe); + + rte_free(ze); +} + +static struct nfp_ct_zone_entry * +nfp_ct_zone_entry_get(struct nfp_flow_priv *priv, + uint32_t zone, + bool wildcard) +{ + bool is_ok; + struct nfp_ct_zone_entry *ze; + + if (wildcard) { + if (priv->ct_zone_wc != NULL) + return priv->ct_zone_wc; + + ze = rte_zmalloc("ct_zone_wc", sizeof(*ze), 0); + if (ze == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry"); + return NULL; + } + + is_ok = nfp_ct_zone_entry_init(ze, priv, zone, true); + if (!is_ok) { + PMD_DRV_LOG(ERR, "Init ct zone wc entry failed"); + goto free_ct_zone_entry; + } + + priv->ct_zone_wc = ze; + } else { + ze = nfp_ct_zone_table_search(priv, (char *)&zone, sizeof(uint32_t)); + if (ze != NULL) + return ze; + + ze = rte_zmalloc("ct_zone_entry", sizeof(*ze), 0); + if (ze == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry"); + return NULL; + } + + is_ok = nfp_ct_zone_entry_init(ze, priv, zone, false); + if (!is_ok) { + PMD_DRV_LOG(ERR, "Init ct zone entry failed"); + goto free_ct_zone_entry; + } + + is_ok = nfp_ct_zone_table_add(priv, ze); + if (!is_ok) { + PMD_DRV_LOG(ERR, "Add into ct zone table failed"); + goto free_ct_zone_entry; + } + } + + return ze; + +free_ct_zone_entry: + nfp_ct_zone_entry_destroy(ze); + + return NULL; +} + +static void +nfp_ct_zone_entry_free(struct nfp_ct_zone_entry *ze, + bool wildcard) +{ + if (LIST_EMPTY(&ze->pre_ct_list) && LIST_EMPTY(&ze->post_ct_list)) { + if (!wildcard) + nfp_ct_zone_table_delete(ze->priv, ze); + + nfp_ct_zone_entry_destroy(ze); + } +} + +static int +nfp_ct_offload_add(struct nfp_flower_representor *repr, + struct nfp_ct_merge_entry *merge_entry) +{ + int ret; + uint64_t cookie; + struct rte_flow *nfp_flow; + struct nfp_flow_priv *priv; + const struct rte_flow_item *items; + const struct rte_flow_action *actions; + + cookie = rte_rand(); + items = merge_entry->rule.items; + actions = merge_entry->rule.actions; + nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true, true); + if (nfp_flow == NULL) { + PMD_DRV_LOG(ERR, "Process the merged flow rule failed."); + return -EINVAL; + } + + merge_entry->ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id); + + /* Add the flow to hardware */ + priv = repr->app_fw_flower->flow_priv; + ret = nfp_flower_cmsg_flow_add(repr->app_fw_flower, nfp_flow); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Add the merged flow to firmware failed."); + goto flow_teardown; + } + + /* Add the flow to flow hash table */ + ret = nfp_flow_table_add_merge(priv, nfp_flow); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Add the merged flow to flow table failed."); + goto flow_teardown; + } + + merge_entry->compiled_rule = nfp_flow; + + return 0; + +flow_teardown: + nfp_flow_teardown(priv, nfp_flow, false); + nfp_flow_free(nfp_flow); + + return ret; +} + +int +nfp_ct_offload_del(struct rte_eth_dev *dev, + struct nfp_ct_map_entry *me, + struct rte_flow_error *error) +{ + int ret; + struct nfp_ct_flow_entry *fe; + struct nfp_ct_merge_entry *m_ent; + + fe = me->fe; + + if (fe->type == CT_TYPE_PRE_CT) { + LIST_FOREACH(m_ent, &fe->children, pre_ct_list) { + if (m_ent->compiled_rule != NULL) { + ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item"); + return -EINVAL; + } + m_ent->compiled_rule = NULL; + } + + m_ent->pre_ct_parent = NULL; + LIST_REMOVE(m_ent, pre_ct_list); + if (m_ent->post_ct_parent == NULL) + nfp_ct_merge_entry_destroy(m_ent); + } + } else { + LIST_FOREACH(m_ent, &fe->children, post_ct_list) { + if (m_ent->compiled_rule != NULL) { + ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item"); + return -EINVAL; + } + m_ent->compiled_rule = NULL; + } + + m_ent->post_ct_parent = NULL; + LIST_REMOVE(m_ent, post_ct_list); + if (m_ent->pre_ct_parent == NULL) + nfp_ct_merge_entry_destroy(m_ent); + } + } + + nfp_ct_flow_entry_destroy_partly(fe); + + return 0; +} + +static inline bool +is_item_check_pass(const struct rte_flow_item *item1, + const struct rte_flow_item *item2, + uint8_t *cnt_same) +{ + bool pass; + uint32_t i; + size_t size; + const char *key1 = item1->spec; + const char *key2 = item2->spec; + const char *mask1 = item1->mask; + const char *mask2 = item2->mask; + + if (item1->type != item2->type) + return true; + + pass = nfp_flow_item_conf_size_get(item1->type, &size); + if (!pass) + return false; + + for (i = 0; i < size; i++) { + if ((key1[i] & mask1[i] & mask2[i]) ^ (key2[i] & mask1[i] & mask2[i])) + return false; + } + + *cnt_same = *cnt_same + 1; + + return true; +} + +static bool +nfp_ct_merge_items_check(struct rte_flow_item *items1, + struct rte_flow_item *items2, + uint8_t *cnt_same) +{ + bool pass; + bool is_tun_flow_1; + bool is_tun_flow_2; + const struct rte_flow_item *item1; + const struct rte_flow_item *item2; + const struct rte_flow_item *inner_item1 = NULL; + const struct rte_flow_item *inner_item2 = NULL; + + is_tun_flow_1 = nfp_flow_inner_item_get(items1, &inner_item1); + is_tun_flow_2 = nfp_flow_inner_item_get(items2, &inner_item2); + + if (is_tun_flow_1) { + if (is_tun_flow_2) { + /* Outer layer */ + for (item1 = items1; item1 != inner_item1; item1++) { + for (item2 = items2; item2 != inner_item2; item2++) { + pass = is_item_check_pass(item1, item2, cnt_same); + if (!pass) + return false; + } + } + /* Inner layer */ + for (item1 = inner_item1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) { + for (item2 = inner_item2; item2->type != RTE_FLOW_ITEM_TYPE_END; + item2++) { + pass = is_item_check_pass(item1, item2, cnt_same); + if (!pass) + return false; + } + } + } else { + for (item1 = items1; item1 != inner_item1; item1++) { + for (item2 = items2; item2->type != RTE_FLOW_ITEM_TYPE_END; + item2++) { + pass = is_item_check_pass(item1, item2, cnt_same); + if (!pass) + return false; + } + } + } + } else { + if (is_tun_flow_2) { + for (item1 = items1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) { + for (item2 = items2; item2 != inner_item2; item2++) { + pass = is_item_check_pass(item1, item2, cnt_same); + if (!pass) + return false; + } + } + } else { + for (item1 = items1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) { + for (item2 = items2; item2->type != RTE_FLOW_ITEM_TYPE_END; + item2++) { + pass = is_item_check_pass(item1, item2, cnt_same); + if (!pass) + return false; + } + } + } + } + + return true; +} + +static inline bool +is_action_pattern_check_pass(struct rte_flow_item *items, + enum rte_flow_item_type type) +{ + struct rte_flow_item *item; + + for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == type) + return false; + } + + return true; +} + +static bool +nfp_ct_merge_action_check(struct rte_flow_action *action, + struct rte_flow_item *items) +{ + bool pass = true; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_ETH); + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_IPV4); + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_IPV6); + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_UDP); + pass |= is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_TCP); + pass |= is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_SCTP); + break; + default: + break; + } + + return pass; +} + +static bool +nfp_ct_merge_actions_check(struct rte_flow_action *actions, + struct rte_flow_item *items, + uint8_t *cnt_same) +{ + bool pass = true; + struct rte_flow_action *action; + + for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + pass = nfp_ct_merge_action_check(action, items); + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_JUMP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_COUNT: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_DROP: /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_VOID: + *cnt_same = *cnt_same + 1; + break; + default: + pass = false; + break; + } + } + + return pass; +} + +static void +nfp_ct_merge_item_real(const struct rte_flow_item *item_src, + struct rte_flow_item *item_dst) +{ + uint32_t i; + char *key_dst; + char *mask_dst; + size_t size = 0; + const char *key_src; + const char *mask_src; + + key_src = item_src->spec; + mask_src = item_src->mask; + key_dst = (char *)(ptrdiff_t)item_dst->spec; + mask_dst = (char *)(ptrdiff_t)item_dst->mask; + nfp_flow_item_conf_size_get(item_src->type, &size); + + for (i = 0; i < size; i++) { + key_dst[i] |= key_src[i]; + mask_dst[i] |= mask_src[i]; + } +} + +static bool +nfp_ct_merge_item(uint32_t index, + const struct rte_flow_item *item1, + const struct rte_flow_item *item2_start, + const struct rte_flow_item *item2_end, + struct nfp_ct_merge_entry *merge_entry) +{ + struct rte_flow_item *item; + const struct rte_flow_item *item2; + + /* Copy to the merged items */ + item = &merge_entry->rule.items[index]; + *item = *item1; + + item2 = item2_start; + if (item2_end != NULL) { + for (; item2 != item2_end; item2++) { + if (item1->type == item2->type) { + nfp_ct_merge_item_real(item2, item); + return true; + } + } + } else { + for (; item2->type != RTE_FLOW_ITEM_TYPE_END; item2++) { + if (item1->type == item2->type) { + nfp_ct_merge_item_real(item2, item); + return true; + } + } + } + + return false; +} + +static void +nfp_ct_merge_items(struct nfp_ct_merge_entry *merge_entry) +{ + uint32_t index = 0; + bool is_tun_flow_1; + bool is_tun_flow_2; + struct rte_flow_item *items1; + struct rte_flow_item *items2; + struct rte_flow_item *merge_item; + const struct rte_flow_item *item; + const struct rte_flow_item *inner1 = NULL; + const struct rte_flow_item *inner2 = NULL; + + items1 = merge_entry->pre_ct_parent->rule.items; + items2 = merge_entry->post_ct_parent->rule.items; + is_tun_flow_1 = nfp_flow_inner_item_get(items1, &inner1); + is_tun_flow_2 = nfp_flow_inner_item_get(items2, &inner2); + + if (is_tun_flow_1) { + if (is_tun_flow_2) { + /* Outer layer */ + for (item = items1; item != inner1; item++, index++) { + if (nfp_ct_merge_item(index, item, items2, inner2, merge_entry)) + items2++; + } + + /* Copy the remainning outer layer items */ + for (item = items2; item != inner2; item++, index++) { + merge_item = &merge_entry->rule.items[index]; + *merge_item = *item; + } + + /* Inner layer */ + for (item = inner1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + if (nfp_ct_merge_item(index, item, inner2, NULL, merge_entry)) + items2++; + } + + /* Copy the remainning inner layer items */ + for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + merge_item = &merge_entry->rule.items[index]; + *merge_item = *item; + } + } else { + for (item = items1; item != inner1; item++, index++) { + if (nfp_ct_merge_item(index, item, items2, NULL, merge_entry)) + items2++; + } + + /* Copy the remainning items */ + for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + merge_item = &merge_entry->rule.items[index]; + *merge_item = *item; + } + + /* Copy the inner layer items */ + for (item = inner1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + merge_item = &merge_entry->rule.items[index]; + *merge_item = *item; + } + } + } else { + if (is_tun_flow_2) { + for (item = items1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + if (nfp_ct_merge_item(index, item, items2, inner2, merge_entry)) + items2++; + } + + /* Copy the remainning items */ + for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + merge_item = &merge_entry->rule.items[index]; + *merge_item = *item; + } + } else { + for (item = items1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + if (nfp_ct_merge_item(index, item, items2, NULL, merge_entry)) + items2++; + } + + /* Copy the remainning items */ + for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) { + merge_item = &merge_entry->rule.items[index]; + *merge_item = *item; + } + } + } +} + +static void +nfp_ct_merge_actions(struct nfp_ct_merge_entry *merge_entry) +{ + struct rte_flow_action *action; + struct rte_flow_action *merge_actions; + + merge_actions = merge_entry->rule.actions; + + action = merge_entry->pre_ct_parent->rule.actions; + for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK || + action->type == RTE_FLOW_ACTION_TYPE_JUMP) + continue; + + *merge_actions = *action; + merge_actions++; + } + + action = merge_entry->post_ct_parent->rule.actions; + for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + *merge_actions = *action; + merge_actions++; + } +} + +static bool +nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze, + struct nfp_ct_flow_entry *pre_ct_entry, + struct nfp_ct_flow_entry *post_ct_entry) +{ + bool ret; + uint64_t new_cookie[2]; + uint8_t cnt_same_item = 0; + uint8_t cnt_same_action = 0; + struct nfp_ct_merge_entry *merge_entry; + + if (pre_ct_entry->repr != post_ct_entry->repr) + return true; + + ret = nfp_ct_merge_items_check(pre_ct_entry->rule.items, + post_ct_entry->rule.items, &cnt_same_item); + if (!ret) + return true; + + ret = nfp_ct_merge_actions_check(pre_ct_entry->rule.actions, + post_ct_entry->rule.items, &cnt_same_action); + if (!ret) + return true; + + new_cookie[0] = pre_ct_entry->cookie; + new_cookie[1] = post_ct_entry->cookie; + merge_entry = nfp_ct_merge_table_search(ze, (char *)&new_cookie, sizeof(uint64_t) * 2); + if (merge_entry != NULL) + return true; + + merge_entry = rte_zmalloc("ct_merge_entry", sizeof(*merge_entry), 0); + if (merge_entry == NULL) { + PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed"); + return false; + } + + merge_entry->ze = ze; + merge_entry->pre_ct_parent = pre_ct_entry; + merge_entry->post_ct_parent = post_ct_entry; + rte_memcpy(merge_entry->cookie, new_cookie, sizeof(new_cookie)); + merge_entry->rule.items_cnt = pre_ct_entry->rule.items_cnt + + post_ct_entry->rule.items_cnt - cnt_same_item - 1; + merge_entry->rule.actions_cnt = pre_ct_entry->rule.actions_cnt + + post_ct_entry->rule.actions_cnt - cnt_same_action - 1; + + merge_entry->rule.items = rte_zmalloc("ct_flow_item", + sizeof(struct rte_flow_item) * merge_entry->rule.items_cnt, 0); + if (merge_entry->rule.items == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc items for merged flow"); + goto merge_exit; + } + + merge_entry->rule.actions = rte_zmalloc("ct_flow_action", + sizeof(struct rte_flow_action) * merge_entry->rule.actions_cnt, 0); + if (merge_entry->rule.actions == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow"); + goto free_items; + } + + nfp_ct_merge_items(merge_entry); + nfp_ct_merge_actions(merge_entry); + + /* Add this entry to the pre_ct and post_ct lists */ + LIST_INSERT_HEAD(&pre_ct_entry->children, merge_entry, pre_ct_list); + LIST_INSERT_HEAD(&post_ct_entry->children, merge_entry, post_ct_list); + + ret = nfp_ct_merge_table_add(ze, merge_entry); + if (!ret) { + PMD_DRV_LOG(ERR, "Add into ct merge table failed"); + goto free_actions; + } + + /* Send to firmware */ + ret = nfp_ct_offload_add(pre_ct_entry->repr, merge_entry); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed"); + goto merge_table_del; + } + + return true; + +merge_table_del: + nfp_ct_merge_table_delete(ze, merge_entry); +free_actions: + rte_free(merge_entry->rule.actions); +free_items: + rte_free(merge_entry->rule.items); +merge_exit: + LIST_REMOVE(merge_entry, post_ct_list); + LIST_REMOVE(merge_entry, pre_ct_list); + rte_free(merge_entry); + + return ret; +} + +static bool +nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe, + struct nfp_ct_zone_entry *ze_src, + struct nfp_ct_zone_entry *ze_dst) +{ + bool ret; + struct nfp_ct_flow_entry *fe_tmp; + + if (fe->type == CT_TYPE_PRE_CT) { + LIST_FOREACH(fe_tmp, &ze_src->post_ct_list, post_ct_list) { + ret = nfp_ct_do_flow_merge(ze_dst, fe, fe_tmp); + if (!ret) { + PMD_DRV_LOG(ERR, "Merge for ct pre flow failed"); + return false; + } + } + } else { + LIST_FOREACH(fe_tmp, &ze_src->pre_ct_list, pre_ct_list) { + ret = nfp_ct_do_flow_merge(ze_dst, fe_tmp, fe); + if (!ret) { + PMD_DRV_LOG(ERR, "Merge for ct post flow failed"); + return false; + } + } + } + + return true; +} + +static bool +nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item, + struct nfp_flower_representor *representor, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + uint64_t cookie) +{ + bool ret; + struct nfp_flow_priv *priv; + struct nfp_ct_zone_entry *ze; + struct nfp_ct_flow_entry *fe; + const struct ct_data *ct = ct_item->spec; + + priv = representor->app_fw_flower->flow_priv; + ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, false); + if (ze == NULL) { + PMD_DRV_LOG(ERR, "Could not get ct zone entry"); + return false; + } + + /* Add entry to pre_ct_list */ + fe = nfp_ct_flow_entry_get(ze, representor, items, actions, cookie); + if (fe == NULL) { + PMD_DRV_LOG(ERR, "Could not get ct flow entry"); + goto ct_zone_entry_free; + } + + fe->type = CT_TYPE_PRE_CT; + LIST_INSERT_HEAD(&ze->pre_ct_list, fe, pre_ct_list); + + ret = nfp_ct_merge_flow_entries(fe, ze, ze); + if (!ret) { + PMD_DRV_LOG(ERR, "Merge ct flow entries failed"); + goto ct_flow_entry_free; + } + + /* Need to check and merge with tables in the wc_zone as well */ + if (priv->ct_zone_wc != NULL) { + ret = nfp_ct_merge_flow_entries(fe, priv->ct_zone_wc, ze); + if (!ret) { + PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed"); + goto ct_flow_entry_free; + } + } + + return true; + +ct_flow_entry_free: + nfp_ct_flow_entry_destroy(fe); + +ct_zone_entry_free: + nfp_ct_zone_entry_free(ze, false); + + return false; +} + +static bool +nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item, + struct nfp_flower_representor *representor, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + uint64_t cookie) +{ + bool ret; + void *next_data; + uint32_t iter = 0; + const void *next_key; + bool wildcard = false; + struct nfp_flow_priv *priv; + struct nfp_ct_zone_entry *ze; + struct nfp_ct_flow_entry *fe; + const struct ct_data *ct = ct_item->spec; + const struct ct_data *ct_mask = ct_item->mask; + + if (ct_mask->ct_zone == 0) { + wildcard = true; + } else if (ct_mask->ct_zone != UINT16_MAX) { + PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported"); + return false; + } + + priv = representor->app_fw_flower->flow_priv; + ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, wildcard); + if (ze == NULL) { + PMD_DRV_LOG(ERR, "Could not get ct zone entry"); + return false; + } + + /* Add entry to post_ct_list */ + fe = nfp_ct_flow_entry_get(ze, representor, items, actions, cookie); + if (fe == NULL) { + PMD_DRV_LOG(ERR, "Could not get ct flow entry"); + goto ct_zone_entry_free; + } + + fe->type = CT_TYPE_POST_CT; + LIST_INSERT_HEAD(&ze->post_ct_list, fe, post_ct_list); + + if (wildcard) { + while (rte_hash_iterate(priv->ct_zone_table, &next_key, &next_data, &iter) >= 0) { + ze = (struct nfp_ct_zone_entry *)next_data; + ret = nfp_ct_merge_flow_entries(fe, ze, ze); + if (!ret) { + PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed"); + break; + } + } + } else { + ret = nfp_ct_merge_flow_entries(fe, ze, ze); + } + + if (!ret) + goto ct_flow_entry_free; + + return true; + +ct_flow_entry_free: + nfp_ct_flow_entry_destroy(fe); + +ct_zone_entry_free: + nfp_ct_zone_entry_free(ze, wildcard); + + return false; +} + +struct rte_flow * +nfp_ct_flow_setup(struct nfp_flower_representor *representor, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + const struct rte_flow_item *ct_item, + bool validate_flag, + uint64_t cookie) +{ + const struct ct_data *ct; + + if (ct_item == NULL) + return NULL; + + ct = ct_item->spec; + + if (is_ct_commit_flow(ct)) { + return nfp_flow_process(representor, &items[1], actions, + validate_flag, cookie, false, false); + } + + if (is_post_ct_flow(ct)) { + if (nfp_flow_handle_post_ct(ct_item, representor, &items[1], + actions, cookie)) { + return nfp_flow_process(representor, &items[1], actions, + validate_flag, cookie, false, false); + } + + PMD_DRV_LOG(ERR, "Handle nfp post ct flow failed."); + return NULL; + } + + if (is_pre_ct_flow(ct, actions)) { + if (nfp_flow_handle_pre_ct(ct_item, representor, &items[1], + actions, cookie)) { + return nfp_flow_process(representor, &items[1], actions, + validate_flag, cookie, false, false); + } + + PMD_DRV_LOG(ERR, "Handle nfp pre ct flow failed."); + return NULL; + } + + PMD_DRV_LOG(ERR, "Unsupported ct flow type."); + return NULL; +} + +static inline void +nfp_ct_flow_stats_update(struct nfp_flow_priv *priv, + struct nfp_ct_merge_entry *m_ent) +{ + uint32_t ctx_id; + struct nfp_fl_stats *merge_stats; + + ctx_id = m_ent->ctx_id; + merge_stats = &priv->stats[ctx_id]; + + m_ent->pre_ct_parent->stats.bytes += merge_stats->bytes; + m_ent->pre_ct_parent->stats.pkts += merge_stats->pkts; + m_ent->post_ct_parent->stats.bytes += merge_stats->bytes; + m_ent->post_ct_parent->stats.pkts += merge_stats->pkts; + + merge_stats->bytes = 0; + merge_stats->pkts = 0; +} + +struct nfp_fl_stats * +nfp_ct_flow_stats_get(struct nfp_flow_priv *priv, + struct nfp_ct_map_entry *me) +{ + struct nfp_ct_merge_entry *m_ent; + + rte_spinlock_lock(&priv->stats_lock); + + if (me->fe->type == CT_TYPE_PRE_CT) { + LIST_FOREACH(m_ent, &me->fe->children, pre_ct_list) + nfp_ct_flow_stats_update(priv, m_ent); + } else { + LIST_FOREACH(m_ent, &me->fe->children, post_ct_list) + nfp_ct_flow_stats_update(priv, m_ent); + } + + rte_spinlock_unlock(&priv->stats_lock); + + return &me->fe->stats; +} diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h new file mode 100644 index 00000000000..5abab4e984e --- /dev/null +++ b/drivers/net/nfp/flower/nfp_conntrack.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Corigine, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CONNTRACK_H__ +#define __NFP_CONNTRACK_H__ + +#include + +#include +#include + +#include "../nfp_flow.h" + +struct nfp_ct_map_entry; + +struct nfp_ct_zone_entry; + +struct nfp_ct_merge_entry; + +struct nfp_ct_map_entry *nfp_ct_map_table_search(struct nfp_flow_priv *priv, + char *hash_data, + uint32_t hash_len); + +int nfp_ct_offload_del(struct rte_eth_dev *dev, + struct nfp_ct_map_entry *me, + struct rte_flow_error *error); + +struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + const struct rte_flow_item *ct_item, + bool validate_flag, + uint64_t cookie); + +struct nfp_fl_stats *nfp_ct_flow_stats_get(struct nfp_flow_priv *priv, + struct nfp_ct_map_entry *me); + +#endif /* __NFP_CONNTRACK_H__ */ diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c index 77dab864f31..0c54d7ed272 100644 --- a/drivers/net/nfp/flower/nfp_flower.c +++ b/drivers/net/nfp/flower/nfp_flower.c @@ -3,36 +3,30 @@ * All rights reserved. */ -#include -#include -#include +#include "nfp_flower.h" + #include -#include -#include +#include +#include -#include "../nfp_common.h" -#include "../nfp_logs.h" -#include "../nfp_ctrl.h" -#include "../nfp_cpp_bridge.h" -#include "../nfp_rxtx.h" #include "../nfd3/nfp_nfd3.h" #include "../nfdk/nfp_nfdk.h" -#include "../nfpcore/nfp_mip.h" -#include "../nfpcore/nfp_rtsym.h" #include "../nfpcore/nfp_nsp.h" -#include "nfp_flower.h" +#include "../nfpcore/nfp_rtsym.h" +#include "../nfp_cpp_bridge.h" +#include "../nfp_logs.h" +#include "../nfp_mtr.h" #include "nfp_flower_ctrl.h" #include "nfp_flower_representor.h" -#include "nfp_flower_cmsg.h" #define CTRL_VNIC_NB_DESC 512 static void nfp_pf_repr_enable_queues(struct rte_eth_dev *dev) { + uint16_t i; struct nfp_net_hw *hw; uint64_t enabled_queues = 0; - int i; struct nfp_flower_representor *repr; repr = dev->data->dev_private; @@ -56,9 +50,9 @@ nfp_pf_repr_enable_queues(struct rte_eth_dev *dev) static void nfp_pf_repr_disable_queues(struct rte_eth_dev *dev) { - struct nfp_net_hw *hw; + uint32_t update; uint32_t new_ctrl; - uint32_t update = 0; + struct nfp_net_hw *hw; struct nfp_flower_representor *repr; repr = dev->data->dev_private; @@ -69,13 +63,13 @@ nfp_pf_repr_disable_queues(struct rte_eth_dev *dev) new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE; update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING | - NFP_NET_CFG_UPDATE_MSIX; + NFP_NET_CFG_UPDATE_MSIX; if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; /* If an error when reconfig we avoid to change hw state */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + if (nfp_net_reconfig(hw, new_ctrl, update) != 0) return; hw->ctrl = new_ctrl; @@ -85,6 +79,7 @@ int nfp_flower_pf_start(struct rte_eth_dev *dev) { int ret; + uint16_t i; uint32_t new_ctrl; uint32_t update = 0; struct nfp_net_hw *hw; @@ -106,7 +101,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) update |= NFP_NET_CFG_UPDATE_RSS; - if (hw->cap & NFP_NET_CFG_CTRL_RSS2) + if ((hw->cap & NFP_NET_CFG_CTRL_RSS2) != 0) new_ctrl |= NFP_NET_CFG_CTRL_RSS2; else new_ctrl |= NFP_NET_CFG_CTRL_RSS; @@ -116,7 +111,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; - if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); @@ -137,6 +132,11 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) return -EIO; } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -159,11 +159,13 @@ nfp_flower_pf_stop(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { this_tx_q = dev->data->tx_queues[i]; nfp_net_reset_tx_queue(this_tx_q); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } for (i = 0; i < dev->data->nb_rx_queues; i++) { this_rx_q = dev->data->rx_queues[i]; nfp_net_reset_rx_queue(this_rx_q); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } if (rte_eal_process_type() == RTE_PROC_PRIMARY) @@ -214,7 +216,7 @@ nfp_flower_pf_close(struct rte_eth_dev *dev) nfp_net_reset_rx_queue(this_rx_q); } - /* Cancel possible impending LSC work here before releasing the port*/ + /* Cancel possible impending LSC work here before releasing the port */ rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); @@ -224,7 +226,7 @@ nfp_flower_pf_close(struct rte_eth_dev *dev) /* Now it is safe to free all PF resources */ PMD_DRV_LOG(INFO, "Freeing PF resources"); nfp_cpp_area_free(pf_dev->ctrl_area); - nfp_cpp_area_free(pf_dev->hwqueues_area); + nfp_cpp_area_free(pf_dev->qc_area); free(pf_dev->hwinfo); free(pf_dev->sym_tbl); nfp_cpp_free(pf_dev->cpp); @@ -336,7 +338,8 @@ nfp_flower_pf_xmit_pkts(void *tx_queue, } static int -nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) +nfp_flower_init_vnic_common(struct nfp_net_hw *hw, + const char *vnic_type) { int err; uint32_t start_q; @@ -360,8 +363,8 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; - hw->tx_bar = pf_dev->hw_queues + tx_bar_off; - hw->rx_bar = pf_dev->hw_queues + rx_bar_off; + hw->tx_bar = pf_dev->qc_bar + tx_bar_off; + hw->rx_bar = pf_dev->qc_bar + rx_bar_off; /* Set the current MTU to the maximum supported */ hw->mtu = hw->max_mtu; @@ -385,6 +388,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) int ret = 0; uint16_t n_txq; uint16_t n_rxq; + const char *pci_name; unsigned int numa_node; struct rte_mempool *mp; struct nfp_net_rxq *rxq; @@ -393,6 +397,8 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) struct rte_eth_dev *eth_dev; const struct rte_memzone *tz; struct nfp_app_fw_flower *app_fw_flower; + char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE]; + char ctrl_txring_name[RTE_MEMZONE_NAMESIZE]; char ctrl_pktmbuf_pool_name[RTE_MEMZONE_NAMESIZE]; /* Set up some pointers here for ease of use */ @@ -425,10 +431,12 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) goto eth_dev_cleanup; } + pci_name = strchr(pf_dev->pci_dev->name, ':') + 1; + /* Create a mbuf pool for the ctrl vNIC */ numa_node = rte_socket_id(); snprintf(ctrl_pktmbuf_pool_name, sizeof(ctrl_pktmbuf_pool_name), - "%s_ctrlmp", (strchr(pf_dev->pci_dev->name, ':') + 1)); + "%s_ctrlmp", pci_name); app_fw_flower->ctrl_pktmbuf_pool = rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name, 4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node); @@ -467,6 +475,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) eth_dev->data->nb_rx_queues = n_txq; eth_dev->data->dev_private = hw; + snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name); /* Set up the Rx queues */ for (i = 0; i < n_rxq; i++) { rxq = rte_zmalloc_socket("ethdev RX queue", @@ -487,7 +496,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) /* * Tracking mbuf size for detecting a potential mbuf overflow due to - * RX offset + * RX offset. */ rxq->mem_pool = mp; rxq->mbuf_size = rxq->mem_pool->elt_size; @@ -502,8 +511,9 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_rx_ring", i, - sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC, + tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_rxring_name, i, + sizeof(struct nfp_net_rx_desc) * + hw->dev_info->max_qc_size, NFP_MEMZONE_ALIGN, numa_node); if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating rx dma"); @@ -521,7 +531,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) sizeof(*rxq->rxbufs) * CTRL_VNIC_NB_DESC, RTE_CACHE_LINE_SIZE, numa_node); if (rxq->rxbufs == NULL) { - rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i); + rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i); rte_free(rxq); ret = -ENOMEM; goto rx_queue_setup_cleanup; @@ -533,12 +543,13 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) /* * Telling the HW about the physical address of the RX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(i), rxq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC)); } + snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); /* Set up the Tx queues */ for (i = 0; i < n_txq; i++) { txq = rte_zmalloc_socket("ethdev TX queue", @@ -557,8 +568,9 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_tx_ring", i, - sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC, + tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_txring_name, i, + sizeof(struct nfp_net_nfd3_tx_desc) * + hw->dev_info->max_qc_size, NFP_MEMZONE_ALIGN, numa_node); if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating tx dma"); @@ -584,7 +596,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) sizeof(*txq->txbufs) * CTRL_VNIC_NB_DESC, RTE_CACHE_LINE_SIZE, numa_node); if (txq->txbufs == NULL) { - rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i); + rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i); rte_free(txq); ret = -ENOMEM; goto tx_queue_setup_cleanup; @@ -596,7 +608,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) /* * Telling the HW about the physical address of the TX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(i), txq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC)); @@ -609,7 +621,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) txq = eth_dev->data->tx_queues[i]; if (txq != NULL) { rte_free(txq->txbufs); - rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i); + rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i); rte_free(txq); } } @@ -618,7 +630,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) rxq = eth_dev->data->rx_queues[i]; if (rxq != NULL) { rte_free(rxq->rxbufs); - rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i); + rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i); rte_free(rxq); } } @@ -639,28 +651,35 @@ static void nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw) { uint32_t i; + const char *pci_name; struct nfp_net_rxq *rxq; struct nfp_net_txq *txq; struct rte_eth_dev *eth_dev; struct nfp_app_fw_flower *app_fw_flower; + char ctrl_txring_name[RTE_MEMZONE_NAMESIZE]; + char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE]; eth_dev = hw->eth_dev; app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv); + pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1; + + snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); for (i = 0; i < hw->max_tx_queues; i++) { txq = eth_dev->data->tx_queues[i]; if (txq != NULL) { rte_free(txq->txbufs); - rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i); + rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i); rte_free(txq); } } + snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name); for (i = 0; i < hw->max_rx_queues; i++) { rxq = eth_dev->data->rx_queues[i]; if (rxq != NULL) { rte_free(rxq->rxbufs); - rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i); + rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i); rte_free(rxq); } } @@ -747,7 +766,7 @@ nfp_flower_enable_services(struct nfp_app_fw_flower *app_fw_flower) app_fw_flower->ctrl_vnic_id = service_id; PMD_INIT_LOG(INFO, "%s registered", flower_service.name); - /* Map them to available service cores*/ + /* Map them to available service cores */ ret = nfp_map_service(service_id); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not map %s", flower_service.name); @@ -789,7 +808,8 @@ nfp_flower_nfd_func_register(struct nfp_app_fw_flower *app_fw_flower) } int -nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev) +nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev, + const struct nfp_dev_info *dev_info) { int ret; int err; @@ -858,6 +878,7 @@ nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev) pf_hw->ctrl_bar = pf_dev->ctrl_bar; pf_hw->pf_dev = pf_dev; pf_hw->cpp = pf_dev->cpp; + pf_hw->dev_info = dev_info; ret = nfp_flower_init_vnic_common(app_fw_flower->pf_hw, "pf_vnic"); if (ret != 0) { @@ -883,6 +904,7 @@ nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev) /* Now populate the ctrl vNIC */ ctrl_hw->pf_dev = pf_dev; ctrl_hw->cpp = pf_dev->cpp; + ctrl_hw->dev_info = dev_info; ret = nfp_flower_init_ctrl_vnic(app_fw_flower->ctrl_hw); if (ret != 0) { diff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h index 34092cd4b41..b7ea830209d 100644 --- a/drivers/net/nfp/flower/nfp_flower.h +++ b/drivers/net/nfp/flower/nfp_flower.h @@ -3,8 +3,8 @@ * All rights reserved. */ -#ifndef _NFP_FLOWER_H_ -#define _NFP_FLOWER_H_ +#ifndef __NFP_FLOWER_H__ +#define __NFP_FLOWER_H__ #include "../nfp_common.h" @@ -34,7 +34,9 @@ #define MAX_FLOWER_PHYPORTS 8 #define MAX_FLOWER_VFS 64 +/* Forward declaration */ struct nfp_app_fw_flower; +struct nfp_flower_representor; /* The function pointers for different NFD version */ struct nfp_flower_nfd_func { @@ -51,49 +53,49 @@ struct nfp_flower_nfd_func { /* The flower application's private structure */ struct nfp_app_fw_flower { - /* switch domain for this app */ + /** Switch domain for this app */ uint16_t switch_domain_id; - /* Number of VF representors */ + /** Number of VF representors */ uint8_t num_vf_reprs; - /* Number of phyport representors */ + /** Number of phyport representors */ uint8_t num_phyport_reprs; - /* Pointer to the PF vNIC */ + /** Pointer to the PF vNIC */ struct nfp_net_hw *pf_hw; - /* Pointer to a mempool for the ctrlvNIC */ + /** Pointer to a mempool for the Ctrl vNIC */ struct rte_mempool *ctrl_pktmbuf_pool; - /* Pointer to the ctrl vNIC */ + /** Pointer to the ctrl vNIC */ struct nfp_net_hw *ctrl_hw; - /* Ctrl vNIC Rx counter */ + /** Ctrl vNIC Rx counter */ uint64_t ctrl_vnic_rx_count; - /* Ctrl vNIC Tx counter */ + /** Ctrl vNIC Tx counter */ uint64_t ctrl_vnic_tx_count; - /* Array of phyport representors */ + /** Array of phyport representors */ struct nfp_flower_representor *phy_reprs[MAX_FLOWER_PHYPORTS]; - /* Array of VF representors */ + /** Array of VF representors */ struct nfp_flower_representor *vf_reprs[MAX_FLOWER_VFS]; - /* PF representor */ + /** PF representor */ struct nfp_flower_representor *pf_repr; - /* service id of ctrl vnic service */ + /** Service id of Ctrl vNIC service */ uint32_t ctrl_vnic_id; - /* Flower extra features */ + /** Flower extra features */ uint64_t ext_features; struct nfp_flow_priv *flow_priv; struct nfp_mtr_priv *mtr_priv; - /* Function pointers for different NFD version */ + /** Function pointers for different NFD version */ struct nfp_flower_nfd_func nfd_func; }; @@ -103,7 +105,8 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower) return app_fw_flower->ext_features & NFP_FL_FEATS_DECAP_V2; } -int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev); +int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev, + const struct nfp_dev_info *dev_info); int nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp); bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw, struct rte_mbuf *mbuf, @@ -115,4 +118,4 @@ int nfp_flower_pf_stop(struct rte_eth_dev *dev); uint32_t nfp_flower_pkt_add_metadata(struct nfp_app_fw_flower *app_fw_flower, struct rte_mbuf *mbuf, uint32_t port_id); -#endif /* _NFP_FLOWER_H_ */ +#endif /* __NFP_FLOWER_H__ */ diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c index 00f94c74920..2ec9498d221 100644 --- a/drivers/net/nfp/flower/nfp_flower_cmsg.c +++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c @@ -3,14 +3,19 @@ * All rights reserved. */ +#include "nfp_flower_cmsg.h" + #include "../nfpcore/nfp_nsp.h" #include "../nfp_logs.h" -#include "../nfp_common.h" -#include "nfp_flower.h" -#include "nfp_flower_cmsg.h" #include "nfp_flower_ctrl.h" #include "nfp_flower_representor.h" +static char* +nfp_flower_cmsg_get_data(struct rte_mbuf *m) +{ + return rte_pktmbuf_mtod(m, char *) + 4 + 4 + NFP_FLOWER_CMSG_HLEN; +} + static void * nfp_flower_cmsg_init(struct nfp_app_fw_flower *app_fw_flower, struct rte_mbuf *m, @@ -59,10 +64,10 @@ nfp_flower_cmsg_mac_repr_init(struct rte_mbuf *mbuf, static void nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m, - unsigned int idx, - unsigned int nbi, - unsigned int nbi_port, - unsigned int phys_port) + uint8_t idx, + uint32_t nbi, + uint32_t nbi_port, + uint32_t phys_port) { struct nfp_flower_cmsg_mac_repr *msg; @@ -76,11 +81,11 @@ nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m, int nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower) { - int i; + uint8_t i; uint16_t cnt; - unsigned int nbi; - unsigned int nbi_port; - unsigned int phys_port; + uint32_t nbi; + uint32_t nbi_port; + uint32_t phys_port; struct rte_mbuf *mbuf; struct nfp_eth_table *nfp_eth_table; @@ -225,7 +230,7 @@ nfp_flower_cmsg_flow_add(struct nfp_app_fw_flower *app_fw_flower, return -ENOMEM; } - /* copy the flow to mbuf */ + /* Copy the flow to mbuf */ nfp_flow_meta = flow->payload.meta; msg_len = (nfp_flow_meta->key_len + nfp_flow_meta->mask_len + nfp_flow_meta->act_len) << NFP_FL_LW_SIZ; diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h index f643d54d39a..c2938fb6f63 100644 --- a/drivers/net/nfp/flower/nfp_flower_cmsg.h +++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h @@ -3,14 +3,11 @@ * All rights reserved. */ -#ifndef _NFP_CMSG_H_ -#define _NFP_CMSG_H_ +#ifndef __NFP_CMSG_H__ +#define __NFP_CMSG_H__ -#include -#include - -#include "../nfp_mtr.h" #include "../nfp_flow.h" +#include "nfp_flower.h" struct nfp_flower_cmsg_hdr { rte_be16_t pad; @@ -351,6 +348,56 @@ struct nfp_flower_stats_frame { rte_be64_t stats_cookie; }; +/* + * See RFC 2698 for more details. + * Word[0](Flag options): + * [15] p(pps) 1 for pps, 0 for bps + * + * Meter control message + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-------------------------------+-+---+-----+-+---------+-+---+-+ + * | Reserved |p| Y |TYPE |E| TSHFV |P| PC|R| + * +-------------------------------+-+---+-----+-+---------+-+---+-+ + * | Profile ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Token Bucket Peak | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Token Bucket Committed | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Peak Burst Size | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Committed Burst Size | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Peak Information Rate | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Committed Information Rate | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct nfp_cfg_head { + rte_be32_t flags_opts; + rte_be32_t profile_id; +}; + +/* Profile config, offload to NIC */ +struct nfp_profile_conf { + struct nfp_cfg_head head; /**< Config head information */ + rte_be32_t bkt_tkn_p; /**< Token bucket peak */ + rte_be32_t bkt_tkn_c; /**< Token bucket committed */ + rte_be32_t pbs; /**< Peak burst size */ + rte_be32_t cbs; /**< Committed burst size */ + rte_be32_t pir; /**< Peak information rate */ + rte_be32_t cir; /**< Committed information rate */ +}; + +/* Meter stats, read from firmware */ +struct nfp_mtr_stats_reply { + struct nfp_cfg_head head; /**< Config head information */ + rte_be64_t pass_bytes; /**< Count of passed bytes */ + rte_be64_t pass_pkts; /**< Count of passed packets */ + rte_be64_t drop_bytes; /**< Count of dropped bytes */ + rte_be64_t drop_pkts; /**< Count of dropped packets */ +}; + enum nfp_flower_cmsg_port_type { NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT, @@ -378,12 +425,6 @@ enum nfp_flower_cmsg_port_vnic_type { #define NFP_FLOWER_CMSG_PORT_PCIE_Q(x) ((x) & 0x3f) /* [0,5] */ #define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(x) ((x) & 0xff) /* [0,7] */ -static inline char* -nfp_flower_cmsg_get_data(struct rte_mbuf *m) -{ - return rte_pktmbuf_mtod(m, char *) + 4 + 4 + NFP_FLOWER_CMSG_HLEN; -} - /* * Metadata with L2 (1W/4B) * ---------------------------------------------------------------- @@ -794,7 +835,7 @@ struct nfp_fl_act_set_ipv6_addr { }; /* - * ipv6 tc hl fl + * Ipv6 tc hl fl * 3 2 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -897,9 +938,9 @@ struct nfp_fl_act_set_tun { uint8_t tos; rte_be16_t outer_vlan_tpid; rte_be16_t outer_vlan_tci; - uint8_t tun_len; /* Only valid for NFP_FL_TUNNEL_GENEVE */ + uint8_t tun_len; /**< Only valid for NFP_FL_TUNNEL_GENEVE */ uint8_t reserved2; - rte_be16_t tun_proto; /* Only valid for NFP_FL_TUNNEL_GENEVE */ + rte_be16_t tun_proto; /**< Only valid for NFP_FL_TUNNEL_GENEVE */ } __rte_packed; /* @@ -948,4 +989,4 @@ int nfp_flower_cmsg_qos_delete(struct nfp_app_fw_flower *app_fw_flower, int nfp_flower_cmsg_qos_stats(struct nfp_app_fw_flower *app_fw_flower, struct nfp_cfg_head *head); -#endif /* _NFP_CMSG_H_ */ +#endif /* __NFP_CMSG_H__ */ diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c index 4cb2c2f99e0..b4be28ccdf2 100644 --- a/drivers/net/nfp/flower/nfp_flower_ctrl.c +++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c @@ -3,20 +3,15 @@ * All rights reserved. */ -#include +#include "nfp_flower_ctrl.h" + #include -#include -#include "../nfp_common.h" -#include "../nfp_logs.h" -#include "../nfp_ctrl.h" -#include "../nfp_rxtx.h" #include "../nfd3/nfp_nfd3.h" #include "../nfdk/nfp_nfdk.h" -#include "nfp_flower.h" -#include "nfp_flower_ctrl.h" -#include "nfp_flower_cmsg.h" +#include "../nfp_logs.h" #include "nfp_flower_representor.h" +#include "nfp_mtr.h" #define MAX_PKT_BURST 32 @@ -39,7 +34,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, if (unlikely(rxq == NULL)) { /* * DPDK just checks the queue is lower than max queues - * enabled. But the queue needs to be configured + * enabled. But the queue needs to be configured. */ PMD_RX_LOG(ERR, "RX Bad queue"); return 0; @@ -65,20 +60,19 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, /* * We got a packet. Let's alloc a new mbuf for refilling the - * free descriptor ring as soon as possible + * free descriptor ring as soon as possible. */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); if (unlikely(new_mb == NULL)) { - PMD_RX_LOG(ERR, - "RX mbuf alloc failed port_id=%u queue_id=%hu", - rxq->port_id, rxq->qidx); + PMD_RX_LOG(ERR, "RX mbuf alloc failed port_id=%u queue_id=%hu", + rxq->port_id, rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); break; } /* * Grab the mbuf and refill the descriptor with the - * previously allocated mbuf + * previously allocated mbuf. */ mb = rxb->mbuf; rxb->mbuf = new_mb; @@ -92,23 +86,15 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, /* * This should not happen and the user has the * responsibility of avoiding it. But we have - * to give some info about the error + * to give some info about the error. */ - PMD_RX_LOG(ERR, - "mbuf overflow likely due to the RX offset.\n" - "\t\tYour mbuf size should have extra space for" - " RX offset=%u bytes.\n" - "\t\tCurrently you just have %u bytes available" - " but the received packet is %u bytes long", - hw->rx_offset, - rxq->mbuf_size - hw->rx_offset, - mb->data_len); + PMD_RX_LOG(ERR, "mbuf overflow likely due to the RX offset."); rte_pktmbuf_free(mb); break; } /* Filling the received mbuf with packet info */ - if (hw->rx_offset) + if (hw->rx_offset != 0) mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; else mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds); @@ -130,7 +116,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, nb_hold++; rxq->rd_p++; - if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/ + if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */ rxq->rd_p = 0; } @@ -146,7 +132,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, rte_wmb(); if (nb_hold >= rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu", - rxq->port_id, rxq->qidx, nb_hold, avail); + rxq->port_id, rxq->qidx, nb_hold, avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); nb_hold = 0; } @@ -177,7 +163,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower, if (unlikely(txq == NULL)) { /* * DPDK just checks the queue is lower than max queues - * enabled. But the queue needs to be configured + * enabled. But the queue needs to be configured. */ PMD_TX_LOG(ERR, "ctrl dev TX Bad queue"); goto xmit_end; @@ -200,7 +186,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower, lmbuf = &txq->txbufs[txq->wr_p].mbuf; RTE_MBUF_PREFETCH_TO_FREE(*lmbuf); - if (*lmbuf) + if (*lmbuf != NULL) rte_pktmbuf_free_seg(*lmbuf); *lmbuf = mbuf; @@ -213,7 +199,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower, txds->offset_eop = FLOWER_PKT_DATA_OFFSET | NFD3_DESC_TX_EOP; txq->wr_p++; - if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/ + if (unlikely(txq->wr_p == txq->tx_count)) /* Wrapping */ txq->wr_p = 0; cnt++; @@ -342,7 +328,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower, } txq->wr_p = D_IDX(txq, txq->wr_p + used_descs); - if (txq->wr_p % NFDK_TX_DESC_BLOCK_CNT) + if (txq->wr_p % NFDK_TX_DESC_BLOCK_CNT != 0) txq->data_pending += mbuf->pkt_len; else txq->data_pending = 0; @@ -527,7 +513,7 @@ nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower) ctrl_hw = app_fw_flower->ctrl_hw; ctrl_eth_dev = ctrl_hw->eth_dev; - /* ctrl vNIC only has a single Rx queue */ + /* Ctrl vNIC only has a single Rx queue */ rxq = ctrl_eth_dev->data->rx_queues[0]; while (rte_service_runstate_get(app_fw_flower->ctrl_vnic_id) != 0) { diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.h b/drivers/net/nfp/flower/nfp_flower_ctrl.h index b7e836cf7ef..4c94d36847d 100644 --- a/drivers/net/nfp/flower/nfp_flower_ctrl.h +++ b/drivers/net/nfp/flower/nfp_flower_ctrl.h @@ -3,12 +3,14 @@ * All rights reserved. */ -#ifndef _NFP_FLOWER_CTRL_H_ -#define _NFP_FLOWER_CTRL_H_ +#ifndef __NFP_FLOWER_CTRL_H__ +#define __NFP_FLOWER_CTRL_H__ + +#include "nfp_flower.h" void nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower); uint16_t nfp_flower_ctrl_vnic_xmit(struct nfp_app_fw_flower *app_fw_flower, struct rte_mbuf *mbuf); void nfp_flower_ctrl_vnic_xmit_register(struct nfp_app_fw_flower *app_fw_flower); -#endif /* _NFP_FLOWER_CTRL_H_ */ +#endif /* __NFP_FLOWER_CTRL_H__ */ diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c index 5f94d20f1b0..650f09a4759 100644 --- a/drivers/net/nfp/flower/nfp_flower_representor.c +++ b/drivers/net/nfp/flower/nfp_flower_representor.c @@ -3,21 +3,20 @@ * All rights reserved. */ -#include -#include +#include "nfp_flower_representor.h" -#include "../nfp_common.h" -#include "../nfp_logs.h" -#include "../nfp_ctrl.h" -#include "../nfp_rxtx.h" #include "../nfd3/nfp_nfd3.h" -#include "../nfpcore/nfp_mip.h" -#include "../nfpcore/nfp_rtsym.h" #include "../nfpcore/nfp_nsp.h" -#include "nfp_flower.h" -#include "nfp_flower_representor.h" -#include "nfp_flower_ctrl.h" -#include "nfp_flower_cmsg.h" +#include "../nfp_logs.h" +#include "../nfp_mtr.h" + +/* Type of representor */ +enum nfp_repr_type { + NFP_REPR_TYPE_PHYS_PORT, /*<< External NIC port */ + NFP_REPR_TYPE_PF, /*<< Physical function */ + NFP_REPR_TYPE_VF, /*<< Virtual function */ + NFP_REPR_TYPE_MAX, /*<< Number of representor types */ +}; static int nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev, @@ -50,7 +49,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev, /* * Tracking mbuf size for detecting a potential mbuf overflow due to - * RX offset + * RX offset. */ rxq->mem_pool = mp; rxq->mbuf_size = rxq->mem_pool->elt_size; @@ -67,7 +66,8 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev, * resizing in later calls to the queue setup function. */ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, - sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC, + sizeof(struct nfp_net_rx_desc) * + hw->dev_info->max_qc_size, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating rx dma"); @@ -80,7 +80,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev, rxq->dma = (uint64_t)tz->iova; rxq->rxds = tz->addr; - /* mbuf pointers array for referencing mbufs linked to RX descriptors */ + /* Mbuf pointers array for referencing mbufs linked to RX descriptors */ rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs", sizeof(*rxq->rxbufs) * nb_desc, RTE_CACHE_LINE_SIZE, socket_id); @@ -95,7 +95,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev, /* * Telling the HW about the physical address of the RX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc)); @@ -140,7 +140,8 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev, * resizing in later calls to the queue setup function. */ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, - sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC, + sizeof(struct nfp_net_nfd3_tx_desc) * + hw->dev_info->max_qc_size, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating tx dma"); @@ -152,7 +153,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_count = nb_desc; txq->tx_free_thresh = tx_free_thresh; - /* queue mapping based on firmware configuration */ + /* Queue mapping based on firmware configuration */ txq->qidx = queue_idx; txq->tx_qcidx = queue_idx * hw->stride_tx; txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); @@ -163,7 +164,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev, txq->dma = (uint64_t)tz->iova; txq->txds = tz->addr; - /* mbuf pointers array for referencing mbufs linked to TX descriptors */ + /* Mbuf pointers array for referencing mbufs linked to TX descriptors */ txq->txbufs = rte_zmalloc_socket("txq->txbufs", sizeof(*txq->txbufs) * nb_desc, RTE_CACHE_LINE_SIZE, socket_id); @@ -178,7 +179,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev, /* * Telling the HW about the physical address of the TX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc)); @@ -303,6 +304,7 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev) { struct nfp_flower_representor *repr; struct nfp_app_fw_flower *app_fw_flower; + uint16_t i; repr = dev->data->dev_private; app_fw_flower = repr->app_fw_flower; @@ -314,6 +316,11 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev) nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, true); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -322,6 +329,7 @@ nfp_flower_repr_dev_stop(struct rte_eth_dev *dev) { struct nfp_flower_representor *repr; struct nfp_app_fw_flower *app_fw_flower; + uint16_t i; repr = dev->data->dev_private; app_fw_flower = repr->app_fw_flower; @@ -333,6 +341,11 @@ nfp_flower_repr_dev_stop(struct rte_eth_dev *dev) repr->nfp_idx, 0); } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -457,7 +470,7 @@ nfp_flower_repr_rx_burst(void *rx_queue, total_dequeue = rte_ring_dequeue_burst(repr->ring, (void *)rx_pkts, nb_pkts, &available); if (total_dequeue != 0) { - PMD_RX_LOG(DEBUG, "Representor Rx burst for %s, port_id: 0x%x, " + PMD_RX_LOG(DEBUG, "Representor Rx burst for %s, port_id: %#x, " "received: %u, available: %u", repr->name, repr->port_id, total_dequeue, available); @@ -503,7 +516,7 @@ nfp_flower_repr_tx_burst(void *tx_queue, pf_tx_queue = dev->data->tx_queues[0]; sent = nfp_flower_pf_xmit_pkts(pf_tx_queue, tx_pkts, nb_pkts); if (sent != 0) { - PMD_TX_LOG(DEBUG, "Representor Tx burst for %s, port_id: 0x%x transmitted: %u", + PMD_TX_LOG(DEBUG, "Representor Tx burst for %s, port_id: %#x transmitted: %hu", repr->name, repr->port_id, sent); repr->repr_stats.opackets += sent; } @@ -596,7 +609,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev, /* Memory has been allocated in the eth_dev_create() function */ repr = eth_dev->data->dev_private; - /* Copy data here from the input representor template*/ + /* Copy data here from the input representor template */ repr->vf_id = init_repr_data->vf_id; repr->switch_domain_id = init_repr_data->switch_domain_id; repr->repr_type = init_repr_data->repr_type; @@ -639,6 +652,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, void *init_params) { int ret; + uint16_t index; unsigned int numa_node; char ring_name[RTE_ETH_NAME_MAX_LEN]; struct nfp_app_fw_flower *app_fw_flower; @@ -665,7 +679,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, return -ENOMEM; } - /* Copy data here from the input representor template*/ + /* Copy data here from the input representor template */ repr->vf_id = init_repr_data->vf_id; repr->switch_domain_id = init_repr_data->switch_domain_id; repr->port_id = init_repr_data->port_id; @@ -712,10 +726,13 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, } /* Add repr to correct array */ - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) - app_fw_flower->phy_reprs[repr->nfp_idx] = repr; - else - app_fw_flower->vf_reprs[repr->vf_id] = repr; + if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); + app_fw_flower->phy_reprs[index] = repr; + } else { + index = repr->vf_id; + app_fw_flower->vf_reprs[index] = repr; + } return 0; @@ -745,7 +762,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) nfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table; eth_dev = app_fw_flower->ctrl_hw->eth_dev; - /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware*/ + /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */ ret = nfp_flower_cmsg_mac_repr(app_fw_flower); if (ret != 0) { PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs"); @@ -788,8 +805,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) "%s_repr_p%d", pci_name, i); /* - * Create a eth_dev for this representor - * This will also allocate private memory for the device + * Create a eth_dev for this representor. + * This will also allocate private memory for the device. */ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, sizeof(struct nfp_flower_representor), @@ -805,7 +822,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) /* * Now allocate eth_dev's for VF representors. - * Also send reify messages + * Also send reify messages. */ for (i = 0; i < app_fw_flower->num_vf_reprs; i++) { flower_repr.repr_type = NFP_REPR_TYPE_VF; @@ -819,7 +836,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) snprintf(flower_repr.name, sizeof(flower_repr.name), "%s_repr_vf%d", pci_name, i); - /* This will also allocate private memory for the device*/ + /* This will also allocate private memory for the device */ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, sizeof(struct nfp_flower_representor), NULL, NULL, nfp_flower_repr_init, &flower_repr); diff --git a/drivers/net/nfp/flower/nfp_flower_representor.h b/drivers/net/nfp/flower/nfp_flower_representor.h index 685cbe46b4e..bcb4c3cdb58 100644 --- a/drivers/net/nfp/flower/nfp_flower_representor.h +++ b/drivers/net/nfp/flower/nfp_flower_representor.h @@ -3,31 +3,17 @@ * All rights reserved. */ -#ifndef _NFP_FLOWER_REPRESENTOR_H_ -#define _NFP_FLOWER_REPRESENTOR_H_ +#ifndef __NFP_FLOWER_REPRESENTOR_H__ +#define __NFP_FLOWER_REPRESENTOR_H__ #include "nfp_flower.h" -/* - * enum nfp_repr_type - type of representor - * @NFP_REPR_TYPE_PHYS_PORT: external NIC port - * @NFP_REPR_TYPE_PF: physical function - * @NFP_REPR_TYPE_VF: virtual function - * @NFP_REPR_TYPE_MAX: number of representor types - */ -enum nfp_repr_type { - NFP_REPR_TYPE_PHYS_PORT = 0, - NFP_REPR_TYPE_PF, - NFP_REPR_TYPE_VF, - NFP_REPR_TYPE_MAX, -}; - struct nfp_flower_representor { uint16_t vf_id; uint16_t switch_domain_id; uint32_t repr_type; uint32_t port_id; - uint32_t nfp_idx; /* only valid for the repr of physical port */ + uint32_t nfp_idx; /**< Only valid for the repr of physical port */ char name[RTE_ETH_NAME_MAX_LEN]; struct rte_ether_addr mac_addr; struct nfp_app_fw_flower *app_fw_flower; @@ -38,4 +24,4 @@ struct nfp_flower_representor { int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower); -#endif /* _NFP_FLOWER_REPRESENTOR_H_ */ +#endif /* __NFP_FLOWER_REPRESENTOR_H__ */ diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build index 93c708959ce..40e9ef8524c 100644 --- a/drivers/net/nfp/meson.build +++ b/drivers/net/nfp/meson.build @@ -1,38 +1,43 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2018 Intel Corporation +# Copyright(c) 2018 Corigine, Inc. if not is_linux or not dpdk_conf.get('RTE_ARCH_64') build = false reason = 'only supported on 64-bit Linux' endif + sources = files( + 'flower/nfp_conntrack.c', 'flower/nfp_flower.c', 'flower/nfp_flower_cmsg.c', 'flower/nfp_flower_ctrl.c', 'flower/nfp_flower_representor.c', 'nfd3/nfp_nfd3_dp.c', 'nfdk/nfp_nfdk_dp.c', - 'nfpcore/nfp_cpp_pcie_ops.c', - 'nfpcore/nfp_nsp.c', 'nfpcore/nfp_cppcore.c', - 'nfpcore/nfp_resource.c', + 'nfpcore/nfp_crc.c', + 'nfpcore/nfp_dev.c', + 'nfpcore/nfp_hwinfo.c', 'nfpcore/nfp_mip.c', + 'nfpcore/nfp_mutex.c', 'nfpcore/nfp_nffw.c', - 'nfpcore/nfp_rtsym.c', + 'nfpcore/nfp_nsp.c', 'nfpcore/nfp_nsp_cmds.c', - 'nfpcore/nfp_crc.c', - 'nfpcore/nfp_mutex.c', 'nfpcore/nfp_nsp_eth.c', - 'nfpcore/nfp_hwinfo.c', + 'nfpcore/nfp_resource.c', + 'nfpcore/nfp_rtsym.c', 'nfpcore/nfp_target.c', + 'nfpcore/nfp6000_pcie.c', 'nfp_common.c', - 'nfp_rxtx.c', 'nfp_cpp_bridge.c', - 'nfp_ethdev_vf.c', + 'nfp_ctrl.c', 'nfp_ethdev.c', + 'nfp_ethdev_vf.c', 'nfp_flow.c', + 'nfp_ipsec.c', 'nfp_logs.c', 'nfp_mtr.c', + 'nfp_rxtx.c', ) -deps += ['hash'] +deps += ['hash', 'security'] diff --git a/drivers/net/nfp/nfd3/nfp_nfd3.h b/drivers/net/nfp/nfd3/nfp_nfd3.h index 910e622fa24..3ba562cc3fc 100644 --- a/drivers/net/nfp/nfd3/nfp_nfd3.h +++ b/drivers/net/nfp/nfd3/nfp_nfd3.h @@ -3,46 +3,38 @@ * All rights reserved. */ -#ifndef _NFP_NFD3_H_ -#define _NFP_NFD3_H_ +#ifndef __NFP_NFD3_H__ +#define __NFP_NFD3_H__ + +#include "../nfp_rxtx.h" /* TX descriptor format */ #define NFD3_DESC_TX_EOP RTE_BIT32(7) #define NFD3_DESC_TX_OFFSET_MASK (0x7F) /* [0,6] */ -/* Flags in the host TX descriptor */ -#define NFD3_DESC_TX_CSUM RTE_BIT32(7) -#define NFD3_DESC_TX_IP4_CSUM RTE_BIT32(6) -#define NFD3_DESC_TX_TCP_CSUM RTE_BIT32(5) -#define NFD3_DESC_TX_UDP_CSUM RTE_BIT32(4) -#define NFD3_DESC_TX_VLAN RTE_BIT32(3) -#define NFD3_DESC_TX_LSO RTE_BIT32(2) -#define NFD3_DESC_TX_ENCAP RTE_BIT32(1) -#define NFD3_DESC_TX_O_IP4_CSUM RTE_BIT32(0) - #define NFD3_TX_DESC_PER_PKT 1 struct nfp_net_nfd3_tx_desc { union { struct { - uint8_t dma_addr_hi; /* High bits of host buf address */ - uint16_t dma_len; /* Length to DMA for this desc */ - /* Offset in buf where pkt starts + highest bit is eop flag */ + uint8_t dma_addr_hi; /**< High bits of host buf address */ + uint16_t dma_len; /**< Length to DMA for this desc */ + /** Offset in buf where pkt starts + highest bit is eop flag */ uint8_t offset_eop; - uint32_t dma_addr_lo; /* Low 32bit of host buf addr */ + uint32_t dma_addr_lo; /**< Low 32bit of host buf addr */ - uint16_t mss; /* MSS to be used for LSO */ - uint8_t lso_hdrlen; /* LSO, where the data starts */ - uint8_t flags; /* TX Flags, see @NFD3_DESC_TX_* */ + uint16_t mss; /**< MSS to be used for LSO */ + uint8_t lso_hdrlen; /**< LSO, where the data starts */ + uint8_t flags; /**< TX Flags, see @NFD3_DESC_TX_* */ union { struct { - uint8_t l3_offset; /* L3 header offset */ - uint8_t l4_offset; /* L4 header offset */ + uint8_t l3_offset; /**< L3 header offset */ + uint8_t l4_offset; /**< L4 header offset */ }; - uint16_t vlan; /* VLAN tag to add if indicated */ + uint16_t vlan; /**< VLAN tag to add if indicated */ }; - uint16_t data_len; /* Length of frame + meta data */ + uint16_t data_len; /**< Length of frame + meta data */ } __rte_packed; uint32_t vals[4]; }; @@ -62,13 +54,14 @@ nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq) return (free_desc > 8) ? (free_desc - 8) : 0; } -/* - * nfp_net_nfd3_txq_full() - Check if the TX queue free descriptors - * is below tx_free_threshold for firmware of nfd3 - * - * @txq: TX queue to check +/** + * Check if the TX queue free descriptors is below tx_free_threshold + * for firmware with nfd3 * * This function uses the host copy* of read/write pointers. + * + * @param txq + * TX queue to check */ static inline bool nfp_net_nfd3_txq_full(struct nfp_net_txq *txq) @@ -76,83 +69,6 @@ nfp_net_nfd3_txq_full(struct nfp_net_txq *txq) return (nfp_net_nfd3_free_tx_desc(txq) < txq->tx_free_thresh); } -/* nfp_net_nfd3_tx_tso() - Set NFD3 TX descriptor for TSO */ -static inline void -nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq, - struct nfp_net_nfd3_tx_desc *txd, - struct rte_mbuf *mb) -{ - uint64_t ol_flags; - struct nfp_net_hw *hw = txq->hw; - - if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) == 0) - goto clean_txd; - - ol_flags = mb->ol_flags; - if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) - goto clean_txd; - - txd->l3_offset = mb->l2_len; - txd->l4_offset = mb->l2_len + mb->l3_len; - txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; - - if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) { - txd->l3_offset += mb->outer_l2_len + mb->outer_l3_len; - txd->l4_offset += mb->outer_l2_len + mb->outer_l3_len; - txd->lso_hdrlen += mb->outer_l2_len + mb->outer_l3_len; - } - - txd->mss = rte_cpu_to_le_16(mb->tso_segsz); - txd->flags = NFD3_DESC_TX_LSO; - - return; - -clean_txd: - txd->flags = 0; - txd->l3_offset = 0; - txd->l4_offset = 0; - txd->lso_hdrlen = 0; - txd->mss = 0; -} - -/* nfp_net_nfd3_tx_cksum() - Set TX CSUM offload flags in NFD3 TX descriptor */ -static inline void -nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, - struct nfp_net_nfd3_tx_desc *txd, - struct rte_mbuf *mb) -{ - uint64_t ol_flags; - struct nfp_net_hw *hw = txq->hw; - - if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) == 0) - return; - - ol_flags = mb->ol_flags; - - /* Set TCP csum offload if TSO enabled. */ - if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) - txd->flags |= NFD3_DESC_TX_TCP_CSUM; - - /* IPv6 does not need checksum */ - if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0) - txd->flags |= NFD3_DESC_TX_IP4_CSUM; - - if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) - txd->flags |= NFD3_DESC_TX_ENCAP; - - switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { - case RTE_MBUF_F_TX_UDP_CKSUM: - txd->flags |= NFD3_DESC_TX_UDP_CSUM; - break; - case RTE_MBUF_F_TX_TCP_CKSUM: - txd->flags |= NFD3_DESC_TX_TCP_CSUM; - break; - } - - if ((ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK)) != 0) - txd->flags |= NFD3_DESC_TX_CSUM; -} - uint32_t nfp_flower_nfd3_pkt_add_metadata(struct rte_mbuf *mbuf, uint32_t port_id); uint16_t nfp_net_nfd3_xmit_pkts_common(void *tx_queue, @@ -168,4 +84,4 @@ int nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); -#endif /* _NFP_NFD3_H_ */ +#endif /* __NFP_NFD3_H__ */ diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c index ee39686329b..b64dbaca0df 100644 --- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c +++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c @@ -3,16 +3,100 @@ * All rights reserved. */ -#include +#include "nfp_nfd3.h" + #include #include -#include "../nfp_logs.h" -#include "../nfp_common.h" -#include "../nfp_rxtx.h" #include "../flower/nfp_flower.h" -#include "../flower/nfp_flower_cmsg.h" -#include "nfp_nfd3.h" +#include "../nfp_logs.h" + +/* Flags in the host TX descriptor */ +#define NFD3_DESC_TX_CSUM RTE_BIT32(7) +#define NFD3_DESC_TX_IP4_CSUM RTE_BIT32(6) +#define NFD3_DESC_TX_TCP_CSUM RTE_BIT32(5) +#define NFD3_DESC_TX_UDP_CSUM RTE_BIT32(4) +#define NFD3_DESC_TX_VLAN RTE_BIT32(3) +#define NFD3_DESC_TX_LSO RTE_BIT32(2) +#define NFD3_DESC_TX_ENCAP RTE_BIT32(1) +#define NFD3_DESC_TX_O_IP4_CSUM RTE_BIT32(0) + +/* Set NFD3 TX descriptor for TSO */ +static void +nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq, + struct nfp_net_nfd3_tx_desc *txd, + struct rte_mbuf *mb) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) == 0) + goto clean_txd; + + ol_flags = mb->ol_flags; + if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) + goto clean_txd; + + txd->l3_offset = mb->l2_len; + txd->l4_offset = mb->l2_len + mb->l3_len; + txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + + if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) { + txd->l3_offset += mb->outer_l2_len + mb->outer_l3_len; + txd->l4_offset += mb->outer_l2_len + mb->outer_l3_len; + txd->lso_hdrlen += mb->outer_l2_len + mb->outer_l3_len; + } + + txd->mss = rte_cpu_to_le_16(mb->tso_segsz); + txd->flags = NFD3_DESC_TX_LSO; + + return; + +clean_txd: + txd->flags = 0; + txd->l3_offset = 0; + txd->l4_offset = 0; + txd->lso_hdrlen = 0; + txd->mss = 0; +} + +/* Set TX CSUM offload flags in NFD3 TX descriptor */ +static void +nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, + struct nfp_net_nfd3_tx_desc *txd, + struct rte_mbuf *mb) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) == 0) + return; + + ol_flags = mb->ol_flags; + + /* Set TCP csum offload if TSO enabled. */ + if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) + txd->flags |= NFD3_DESC_TX_TCP_CSUM; + + /* IPv6 does not need checksum */ + if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0) + txd->flags |= NFD3_DESC_TX_IP4_CSUM; + + if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) + txd->flags |= NFD3_DESC_TX_ENCAP; + + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_UDP_CKSUM: + txd->flags |= NFD3_DESC_TX_UDP_CSUM; + break; + case RTE_MBUF_F_TX_TCP_CKSUM: + txd->flags |= NFD3_DESC_TX_TCP_CSUM; + break; + } + + if ((ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK)) != 0) + txd->flags |= NFD3_DESC_TX_CSUM; +} uint32_t nfp_flower_nfd3_pkt_add_metadata(struct rte_mbuf *mbuf, @@ -29,14 +113,12 @@ nfp_flower_nfd3_pkt_add_metadata(struct rte_mbuf *mbuf, } /* - * nfp_net_nfd3_tx_vlan() - Set vlan info in the nfd3 tx desc + * Set vlan info in the nfd3 tx desc * * If enable NFP_NET_CFG_CTRL_TXVLAN_V2 - * Vlan_info is stored in the meta and - * is handled in the nfp_net_nfd3_set_meta_vlan() + * Vlan_info is stored in the meta and is handled in the @nfp_net_nfd3_set_meta_vlan() * else if enable NFP_NET_CFG_CTRL_TXVLAN - * Vlan_info is stored in the tx_desc and - * is handled in the nfp_net_nfd3_tx_vlan() + * Vlan_info is stored in the tx_desc and is handled in the @nfp_net_nfd3_tx_vlan() */ static inline void nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq, @@ -63,10 +145,13 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, char *meta; uint8_t layer = 0; uint32_t meta_info; + uint32_t cap_extend; struct nfp_net_hw *hw; uint8_t vlan_layer = 0; + uint8_t ipsec_layer = 0; hw = txq->hw; + cap_extend = hw->cap_ext; if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) != 0 && (hw->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0) { @@ -76,6 +161,18 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, meta_data->header |= NFP_NET_META_VLAN; } + if ((pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) != 0 && + (cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) { + uint32_t ipsec_type = NFP_NET_META_IPSEC | + NFP_NET_META_IPSEC << NFP_NET_META_FIELD_SIZE | + NFP_NET_META_IPSEC << (2 * NFP_NET_META_FIELD_SIZE); + if (meta_data->length == 0) + meta_data->length = NFP_NET_META_FIELD_SIZE; + uint8_t ipsec_offset = meta_data->length - NFP_NET_META_FIELD_SIZE; + meta_data->header |= (ipsec_type << ipsec_offset); + meta_data->length += 3 * NFP_NET_META_FIELD_SIZE; + } + if (meta_data->length == 0) return; @@ -96,6 +193,15 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, nfp_net_set_meta_vlan(meta_data, pkt, layer); vlan_layer++; break; + case NFP_NET_META_IPSEC: + if (ipsec_layer > 2) { + PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now."); + return; + } + + nfp_net_set_meta_ipsec(meta_data, txq, pkt, layer, ipsec_layer); + ipsec_layer++; + break; default: PMD_DRV_LOG(ERR, "The metadata type not supported"); return; @@ -119,14 +225,14 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, uint16_t nb_pkts, bool repr_flag) { - int i; - int pkt_size; - int dma_size; + uint16_t i; uint8_t offset; + uint32_t pkt_size; + uint16_t dma_size; uint64_t dma_addr; uint16_t free_descs; - uint16_t issued_descs; struct rte_mbuf *pkt; + uint16_t issued_descs; struct nfp_net_hw *hw; struct rte_mbuf **lmbuf; struct nfp_net_txq *txq; @@ -191,9 +297,9 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, nfp_net_nfd3_tx_vlan(txq, &txd, pkt); /* - * mbuf data_len is the data in one segment and pkt_len data + * Mbuf data_len is the data in one segment and pkt_len data * in the whole packet. When the packet is just one segment, - * then data_len = pkt_len + * then data_len = pkt_len. */ pkt_size = pkt->pkt_len; @@ -207,7 +313,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, /* * Linking mbuf with descriptor for being released - * next time descriptor is used + * next time descriptor is used. */ *lmbuf = pkt; @@ -222,14 +328,14 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, free_descs--; txq->wr_p++; - if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping */ + if (unlikely(txq->wr_p == txq->tx_count)) /* Wrapping */ txq->wr_p = 0; pkt_size -= dma_size; /* * Making the EOP, packets with just one segment - * the priority + * the priority. */ if (likely(pkt_size == 0)) txds->offset_eop = NFD3_DESC_TX_EOP; @@ -262,7 +368,6 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - int ret; size_t size; uint32_t tx_desc_sz; uint16_t min_tx_desc; @@ -274,11 +379,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_FUNC_TRACE(); - - ret = nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); - if (ret != 0) - return ret; + nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); /* Validating number of descriptors */ tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc); @@ -336,7 +437,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_count = nb_desc * NFD3_TX_DESC_PER_PKT; txq->tx_free_thresh = tx_free_thresh; - /* queue mapping based on firmware configuration */ + /* Queue mapping based on firmware configuration */ txq->qidx = queue_idx; txq->tx_qcidx = queue_idx * hw->stride_tx; txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); @@ -346,7 +447,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, txq->dma = tz->iova; txq->txds = tz->addr; - /* mbuf pointers array for referencing mbufs linked to TX descriptors */ + /* Mbuf pointers array for referencing mbufs linked to TX descriptors */ txq->txbufs = rte_zmalloc_socket("txq->txbufs", sizeof(*txq->txbufs) * txq->tx_count, RTE_CACHE_LINE_SIZE, socket_id); @@ -362,7 +463,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, /* * Telling the HW about the physical address of the TX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count)); diff --git a/drivers/net/nfp/nfdk/nfp_nfdk.h b/drivers/net/nfp/nfdk/nfp_nfdk.h index 66f020efb09..2767fd51cd1 100644 --- a/drivers/net/nfp/nfdk/nfp_nfdk.h +++ b/drivers/net/nfp/nfdk/nfp_nfdk.h @@ -3,11 +3,12 @@ * All rights reserved. */ -#ifndef _NFP_NFDK_H_ -#define _NFP_NFDK_H_ +#ifndef __NFP_NFDK_H__ +#define __NFP_NFDK_H__ + +#include "../nfp_rxtx.h" #define NFDK_TX_DESC_PER_SIMPLE_PKT 2 -#define NFDK_TX_DESC_GATHER_MAX 17 #define NFDK_TX_MAX_DATA_PER_HEAD 0x00001000 /* 4K */ #define NFDK_TX_MAX_DATA_PER_DESC 0x00004000 /* 16K */ @@ -16,7 +17,6 @@ /* The mask of 'dma_len_xx' of address descriptor */ #define NFDK_DESC_TX_DMA_LEN_HEAD 0x0FFF /* [0,11] */ #define NFDK_DESC_TX_DMA_LEN 0x3FFF /* [0,13] */ -#define NFDK_DESC_TX_TYPE_HEAD 0xF000 /* [12,15] */ /* The mask of upper 4 bit of first address descriptor */ #define NFDK_DESC_TX_TYPE_HEAD 0xF000 /* [12,15] */ @@ -75,7 +75,7 @@ * dma_addr_hi - bits [47:32] of host memory address * dma_addr_lo - bits [31:0] of host memory address * - * --> metadata descriptor + * --> Metadata descriptor * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * Word +-------+-----------------------+---------------------+---+-----+ @@ -104,27 +104,27 @@ */ struct nfp_net_nfdk_tx_desc { union { - /* Address descriptor */ + /** Address descriptor */ struct { - uint16_t dma_addr_hi; /* High bits of host buf address */ - uint16_t dma_len_type; /* Length to DMA for this desc */ - uint32_t dma_addr_lo; /* Low 32bit of host buf addr */ + uint16_t dma_addr_hi; /**< High bits of host buf address */ + uint16_t dma_len_type; /**< Length to DMA for this desc */ + uint32_t dma_addr_lo; /**< Low 32bit of host buf addr */ }; - /* TSO descriptor */ + /** TSO descriptor */ struct { - uint16_t mss; /* MSS to be used for LSO */ - uint8_t lso_hdrlen; /* LSO, TCP payload offset */ - uint8_t lso_totsegs; /* LSO, total segments */ - uint8_t l3_offset; /* L3 header offset */ - uint8_t l4_offset; /* L4 header offset */ - uint16_t lso_meta_res; /* Rsvd bits in TSO metadata */ + uint16_t mss; /**< MSS to be used for LSO */ + uint8_t lso_hdrlen; /**< LSO, TCP payload offset */ + uint8_t lso_totsegs; /**< LSO, total segments */ + uint8_t l3_offset; /**< L3 header offset */ + uint8_t l4_offset; /**< L4 header offset */ + uint16_t lso_meta_res; /**< Rsvd bits in TSO metadata */ }; - /* Metadata descriptor */ + /** Metadata descriptor */ struct { - uint8_t flags; /* TX Flags, see @NFDK_DESC_TX_* */ - uint8_t reserved[7]; /* meta byte placeholder */ + uint8_t flags; /**< TX Flags, see @NFDK_DESC_TX_* */ + uint8_t reserved[7]; /**< Meta byte place holder */ }; uint32_t vals[2]; @@ -143,16 +143,17 @@ nfp_net_nfdk_free_tx_desc(struct nfp_net_txq *txq) free_desc = txq->rd_p - txq->wr_p; return (free_desc > NFDK_TX_DESC_STOP_CNT) ? - (free_desc - NFDK_TX_DESC_STOP_CNT) : 0; + (free_desc - NFDK_TX_DESC_STOP_CNT) : 0; } -/* - * nfp_net_nfdk_txq_full() - Check if the TX queue free descriptors - * is below tx_free_threshold for firmware of nfdk - * - * @txq: TX queue to check +/** + * Check if the TX queue free descriptors is below tx_free_threshold + * for firmware of nfdk * * This function uses the host copy* of read/write pointers. + * + * @param txq + * TX queue to check */ static inline bool nfp_net_nfdk_txq_full(struct nfp_net_txq *txq) @@ -160,73 +161,6 @@ nfp_net_nfdk_txq_full(struct nfp_net_txq *txq) return (nfp_net_nfdk_free_tx_desc(txq) < txq->tx_free_thresh); } -/* nfp_net_nfdk_tx_cksum() - Set TX CSUM offload flags in TX descriptor of nfdk */ -static inline uint64_t -nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq, - struct rte_mbuf *mb, - uint64_t flags) -{ - uint64_t ol_flags; - struct nfp_net_hw *hw = txq->hw; - - if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) == 0) - return flags; - - ol_flags = mb->ol_flags; - - /* Set TCP csum offload if TSO enabled. */ - if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) - flags |= NFDK_DESC_TX_L4_CSUM; - - if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) - flags |= NFDK_DESC_TX_ENCAP; - - /* IPv6 does not need checksum */ - if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0) - flags |= NFDK_DESC_TX_L3_CSUM; - - if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != 0) - flags |= NFDK_DESC_TX_L4_CSUM; - - return flags; -} - -/* nfp_net_nfdk_tx_tso() - Set TX descriptor for TSO of nfdk */ -static inline uint64_t -nfp_net_nfdk_tx_tso(struct nfp_net_txq *txq, - struct rte_mbuf *mb) -{ - uint8_t outer_len; - uint64_t ol_flags; - struct nfp_net_nfdk_tx_desc txd; - struct nfp_net_hw *hw = txq->hw; - - txd.raw = 0; - - if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) == 0) - return txd.raw; - - ol_flags = mb->ol_flags; - if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) - return txd.raw; - - txd.l3_offset = mb->l2_len; - txd.l4_offset = mb->l2_len + mb->l3_len; - txd.lso_meta_res = 0; - txd.mss = rte_cpu_to_le_16(mb->tso_segsz); - txd.lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; - txd.lso_totsegs = (mb->pkt_len + mb->tso_segsz) / mb->tso_segsz; - - if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) { - outer_len = mb->outer_l2_len + mb->outer_l3_len; - txd.l3_offset += outer_len; - txd.l4_offset += outer_len; - txd.lso_hdrlen += outer_len; - } - - return txd.raw; -} - uint32_t nfp_flower_nfdk_pkt_add_metadata(struct rte_mbuf *mbuf, uint32_t port_id); uint16_t nfp_net_nfdk_xmit_pkts_common(void *tx_queue, @@ -244,4 +178,4 @@ int nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, int nfp_net_nfdk_tx_maybe_close_block(struct nfp_net_txq *txq, struct rte_mbuf *pkt); -#endif /* _NFP_NFDK_H_ */ +#endif /* __NFP_NFDK_H__ */ diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c index f66802211f5..259039964ab 100644 --- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c +++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c @@ -3,18 +3,83 @@ * All rights reserved. */ -#include +#include "nfp_nfdk.h" + #include #include -#include "../nfp_logs.h" -#include "../nfp_common.h" -#include "../nfp_rxtx.h" -#include "../nfpcore/nfp_mip.h" -#include "../nfpcore/nfp_rtsym.h" #include "../flower/nfp_flower.h" -#include "../flower/nfp_flower_cmsg.h" -#include "nfp_nfdk.h" +#include "../nfpcore/nfp_platform.h" +#include "../nfp_logs.h" + +#define NFDK_TX_DESC_GATHER_MAX 17 + +/* Set TX CSUM offload flags in TX descriptor of nfdk */ +static uint64_t +nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq, + struct rte_mbuf *mb, + uint64_t flags) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) == 0) + return flags; + + ol_flags = mb->ol_flags; + + /* Set TCP csum offload if TSO enabled. */ + if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) + flags |= NFDK_DESC_TX_L4_CSUM; + + if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) + flags |= NFDK_DESC_TX_ENCAP; + + /* IPv6 does not need checksum */ + if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0) + flags |= NFDK_DESC_TX_L3_CSUM; + + if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != 0) + flags |= NFDK_DESC_TX_L4_CSUM; + + return flags; +} + +/* Set TX descriptor for TSO of nfdk */ +static uint64_t +nfp_net_nfdk_tx_tso(struct nfp_net_txq *txq, + struct rte_mbuf *mb) +{ + uint8_t outer_len; + uint64_t ol_flags; + struct nfp_net_nfdk_tx_desc txd; + struct nfp_net_hw *hw = txq->hw; + + txd.raw = 0; + + if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) == 0) + return txd.raw; + + ol_flags = mb->ol_flags; + if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) + return txd.raw; + + txd.l3_offset = mb->l2_len; + txd.l4_offset = mb->l2_len + mb->l3_len; + txd.lso_meta_res = 0; + txd.mss = rte_cpu_to_le_16(mb->tso_segsz); + txd.lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + txd.lso_totsegs = (mb->pkt_len + mb->tso_segsz) / mb->tso_segsz; + + if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) { + outer_len = mb->outer_l2_len + mb->outer_l3_len; + txd.l3_offset += outer_len; + txd.l4_offset += outer_len; + txd.lso_hdrlen += outer_len; + } + + return txd.raw; +} uint32_t nfp_flower_nfdk_pkt_add_metadata(struct rte_mbuf *mbuf, @@ -36,9 +101,7 @@ static inline uint16_t nfp_net_nfdk_headlen_to_segs(uint16_t headlen) { /* First descriptor fits less data, so adjust for that */ - return DIV_ROUND_UP(headlen + - NFDK_TX_MAX_DATA_PER_DESC - - NFDK_TX_MAX_DATA_PER_HEAD, + return DIV_ROUND_UP(headlen + NFDK_TX_MAX_DATA_PER_DESC - NFDK_TX_MAX_DATA_PER_HEAD, NFDK_TX_MAX_DATA_PER_DESC); } @@ -112,13 +175,16 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, char *meta; uint8_t layer = 0; uint32_t meta_type; + uint32_t cap_extend; struct nfp_net_hw *hw; uint32_t header_offset; uint8_t vlan_layer = 0; + uint8_t ipsec_layer = 0; struct nfp_net_meta_raw meta_data; memset(&meta_data, 0, sizeof(meta_data)); hw = txq->hw; + cap_extend = hw->cap_ext; if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) != 0 && (hw->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0) { @@ -128,6 +194,18 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, meta_data.header |= NFP_NET_META_VLAN; } + if ((pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) != 0 && + (cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) { + uint32_t ipsec_type = NFP_NET_META_IPSEC | + NFP_NET_META_IPSEC << NFP_NET_META_FIELD_SIZE | + NFP_NET_META_IPSEC << (2 * NFP_NET_META_FIELD_SIZE); + if (meta_data.length == 0) + meta_data.length = NFP_NET_META_FIELD_SIZE; + uint8_t ipsec_offset = meta_data.length - NFP_NET_META_FIELD_SIZE; + meta_data.header |= (ipsec_type << ipsec_offset); + meta_data.length += 3 * NFP_NET_META_FIELD_SIZE; + } + if (meta_data.length == 0) return; @@ -150,6 +228,15 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, nfp_net_set_meta_vlan(&meta_data, pkt, layer); vlan_layer++; break; + case NFP_NET_META_IPSEC: + if (ipsec_layer > 2) { + PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now."); + return; + } + + nfp_net_set_meta_ipsec(&meta_data, txq, pkt, layer, ipsec_layer); + ipsec_layer++; + break; default: PMD_DRV_LOG(ERR, "The metadata type not supported"); return; @@ -265,8 +352,10 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, * than packet head len. */ if (dma_len > NFDK_DESC_TX_DMA_LEN_HEAD) - dma_len = NFDK_DESC_TX_DMA_LEN_HEAD; - dlen_type = dma_len | (NFDK_DESC_TX_TYPE_HEAD & (type << 12)); + tmp_dlen = NFDK_DESC_TX_DMA_LEN_HEAD; + else + tmp_dlen = dma_len; + dlen_type = tmp_dlen | (NFDK_DESC_TX_TYPE_HEAD & (type << 12)); ktxds->dma_len_type = rte_cpu_to_le_16(dlen_type); dma_addr = rte_mbuf_data_iova(pkt); ktxds->dma_addr_hi = rte_cpu_to_le_16(dma_addr >> 32); @@ -277,7 +366,6 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, * Preserve the original dlen_type, this way below the EOP logic * can use dlen_type. */ - tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD; dma_len -= tmp_dlen; dma_addr += tmp_dlen + 1; @@ -357,7 +445,6 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - int ret; size_t size; uint32_t tx_desc_sz; uint16_t min_tx_desc; @@ -369,11 +456,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_FUNC_TRACE(); - - ret = nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); - if (ret != 0) - return ret; + nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); /* Validating number of descriptors */ tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfdk_tx_desc); @@ -396,7 +479,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, /* * Free memory prior to re-allocation if needed. This is the case after - * calling nfp_net_stop + * calling nfp_net_stop(). */ if (dev->data->tx_queues[queue_idx] != NULL) { PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", @@ -431,7 +514,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_count = nb_desc * NFDK_TX_DESC_PER_SIMPLE_PKT; txq->tx_free_thresh = tx_free_thresh; - /* queue mapping based on firmware configuration */ + /* Queue mapping based on firmware configuration */ txq->qidx = queue_idx; txq->tx_qcidx = queue_idx * hw->stride_tx; txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); @@ -441,7 +524,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, txq->dma = tz->iova; txq->ktxds = tz->addr; - /* mbuf pointers array for referencing mbufs linked to TX descriptors */ + /* Mbuf pointers array for referencing mbufs linked to TX descriptors */ txq->txbufs = rte_zmalloc_socket("txq->txbufs", sizeof(*txq->txbufs) * txq->tx_count, RTE_CACHE_LINE_SIZE, socket_id); @@ -457,7 +540,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, /* * Telling the HW about the physical address of the TX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count)); diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c index 5092e5869de..2d4a7635abe 100644 --- a/drivers/net/nfp/nfp_common.c +++ b/drivers/net/nfp/nfp_common.c @@ -5,52 +5,38 @@ * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "nfp_common.h" + #include -#include -#include -#include "nfpcore/nfp_cpp.h" -#include "nfpcore/nfp_nffw.h" -#include "nfpcore/nfp_hwinfo.h" +#include "flower/nfp_flower_representor.h" +#include "nfd3/nfp_nfd3.h" +#include "nfdk/nfp_nfdk.h" #include "nfpcore/nfp_mip.h" -#include "nfpcore/nfp_rtsym.h" #include "nfpcore/nfp_nsp.h" +#include "nfp_logs.h" -#include "flower/nfp_flower_representor.h" +#define NFP_TX_MAX_SEG UINT8_MAX +#define NFP_TX_MAX_MTU_SEG 8 -#include "nfp_common.h" -#include "nfp_ctrl.h" -#include "nfp_rxtx.h" -#include "nfp_logs.h" -#include "nfp_cpp_bridge.h" +/* + * This is used by the reconfig protocol. It sets the maximum time waiting in + * milliseconds before a reconfig timeout happens. + */ +#define NFP_NET_POLL_TIMEOUT 5000 -#include "nfd3/nfp_nfd3.h" -#include "nfdk/nfp_nfdk.h" +#define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ +#define NFP_NET_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ -#include -#include -#include -#include -#include -#include -#include +/* Maximum supported NFP frame size (MTU + layer 2 headers) */ +#define NFP_FRAME_SIZE_MAX 10048 +#define DEFAULT_FLBUF_SIZE 9216 enum nfp_xstat_group { NFP_XSTAT_GROUP_NET, NFP_XSTAT_GROUP_MAC }; + struct nfp_xstat { char name[RTE_ETH_XSTATS_NAME_SIZE]; int offset; @@ -70,7 +56,7 @@ struct nfp_xstat { } static const struct nfp_xstat nfp_net_xstats[] = { - /** + /* * Basic xstats available on both VF and PF. * Note that in case new statistics of group NFP_XSTAT_GROUP_NET * are added to this array, they must appear before any statistics @@ -95,7 +81,7 @@ static const struct nfp_xstat nfp_net_xstats[] = { NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES), NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES), NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES), - /** + /* * MAC xstats available only on PF. These statistics are not available for VFs as the * PF is not initialized when the VF is initialized as it is still bound to the kernel * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order @@ -187,9 +173,10 @@ nfp_net_link_speed_rte2nfp(uint16_t speed) } static void -nfp_net_notify_port_speed(struct nfp_net_hw *hw, struct rte_eth_link *link) +nfp_net_notify_port_speed(struct nfp_net_hw *hw, + struct rte_eth_link *link) { - /** + /* * Read the link status from NFP_NET_CFG_STS. If the link is down * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to * NFP_NET_CFG_STS_NSP_LINK_RATE. @@ -198,36 +185,38 @@ nfp_net_notify_port_speed(struct nfp_net_hw *hw, struct rte_eth_link *link) nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN); return; } - /** + + /* * Link is up so write the link speed from the eth_table to * NFP_NET_CFG_STS_NSP_LINK_RATE. */ nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE, - nfp_net_link_speed_rte2nfp(link->link_speed)); + nfp_net_link_speed_rte2nfp(link->link_speed)); } /* The length of firmware version string */ #define FW_VER_LEN 32 static int -__nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) +__nfp_net_reconfig(struct nfp_net_hw *hw, + uint32_t update) { - int cnt; + uint32_t cnt; uint32_t new; struct timespec wait; PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...", - hw->qcp_cfg); + hw->qcp_cfg); if (hw->qcp_cfg == NULL) { - PMD_INIT_LOG(ERR, "Bad configuration queue pointer"); + PMD_DRV_LOG(ERR, "Bad configuration queue pointer"); return -ENXIO; } nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1); wait.tv_sec = 0; - wait.tv_nsec = 1000000; + wait.tv_nsec = 1000000; /* 1ms */ PMD_DRV_LOG(DEBUG, "Polling for update ack..."); @@ -236,17 +225,21 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE); if (new == 0) break; - if (new & NFP_NET_CFG_UPDATE_ERR) { - PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new); + + if ((new & NFP_NET_CFG_UPDATE_ERR) != 0) { + PMD_DRV_LOG(ERR, "Reconfig error: %#08x", new); return -1; } + if (cnt >= NFP_NET_POLL_TIMEOUT) { - PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after" - " %dms", update, cnt); + PMD_DRV_LOG(ERR, "Reconfig timeout for %#08x after %u ms", + update, cnt); return -EIO; } - nanosleep(&wait, 0); /* waiting for a 1ms */ + + nanosleep(&wait, 0); /* Waiting for a 1ms */ } + PMD_DRV_LOG(DEBUG, "Ack DONE"); return 0; } @@ -266,10 +259,12 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) * * @return * - (0) if OK to reconfigure the device. - * - (EIO) if I/O err and fail to reconfigure the device. + * - (-EIO) if I/O err and fail to reconfigure the device. */ int -nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) +nfp_net_reconfig(struct nfp_net_hw *hw, + uint32_t ctrl, + uint32_t update) { int ret; @@ -308,10 +303,12 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) * * @return * - (0) if OK to reconfigure the device. - * - (EIO) if I/O err and fail to reconfigure the device. + * - (-EIO) if I/O err and fail to reconfigure the device. */ int -nfp_net_ext_reconfig(struct nfp_net_hw *hw, uint32_t ctrl_ext, uint32_t update) +nfp_net_ext_reconfig(struct nfp_net_hw *hw, + uint32_t ctrl_ext, + uint32_t update) { int ret; @@ -335,55 +332,91 @@ nfp_net_ext_reconfig(struct nfp_net_hw *hw, uint32_t ctrl_ext, uint32_t update) return 0; } +/** + * Reconfigure the firmware via the mailbox + * + * @param hw + * Device to reconfigure + * @param mbox_cmd + * The value for the mailbox command + * + * @return + * - (0) if OK to reconfigure by the mailbox. + * - (-EIO) if I/O err and fail to reconfigure by the mailbox + */ +int +nfp_net_mbox_reconfig(struct nfp_net_hw *hw, + uint32_t mbox_cmd) +{ + int ret; + uint32_t mbox; + + mbox = hw->tlv_caps.mbox_off; + + rte_spinlock_lock(&hw->reconfig_lock); + + nn_cfg_writeq(hw, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); + nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX); + + rte_wmb(); + + ret = __nfp_net_reconfig(hw, NFP_NET_CFG_UPDATE_MBOX); + + rte_spinlock_unlock(&hw->reconfig_lock); + + if (ret != 0) { + PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x", + mbox_cmd, NFP_NET_CFG_UPDATE_MBOX); + return -EIO; + } + + return nn_cfg_readl(hw, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); +} + /* - * Configure an Ethernet device. This function must be invoked first - * before any other function in the Ethernet API. This function can - * also be re-invoked when a device is in the stopped state. + * Configure an Ethernet device. + * + * This function must be invoked first before any other function in the Ethernet API. + * This function can also be re-invoked when a device is in the stopped state. + * + * A DPDK app sends info about how many queues to use and how those queues + * need to be configured. This is used by the DPDK core and it makes sure no + * more queues than those advertised by the driver are requested. + * This function is called after that internal process. */ int nfp_net_configure(struct rte_eth_dev *dev) { + struct nfp_net_hw *hw; struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; struct rte_eth_txmode *txmode; - struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - /* - * A DPDK app sends info about how many queues to use and how - * those queues need to be configured. This is used by the - * DPDK core and it makes sure no more queues than those - * advertised by the driver are requested. This function is - * called after that internal process - */ - - PMD_INIT_LOG(DEBUG, "Configure"); - dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; txmode = &dev_conf->txmode; - if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0) rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; /* Checking TX mode */ - if (txmode->mq_mode) { - PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); + if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { + PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported"); return -EINVAL; } /* Checking RX mode */ - if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG && - !(hw->cap & NFP_NET_CFG_CTRL_RSS_ANY)) { - PMD_INIT_LOG(INFO, "RSS not supported"); + if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 && + (hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) { + PMD_DRV_LOG(ERR, "RSS not supported"); return -EINVAL; } /* Checking MTU set */ if (rxmode->mtu > NFP_FRAME_SIZE_MAX) { - PMD_INIT_LOG(ERR, "MTU (%u) larger than NFP_FRAME_SIZE_MAX (%u) not supported", - rxmode->mtu, NFP_FRAME_SIZE_MAX); + PMD_DRV_LOG(ERR, "MTU (%u) larger than NFP_FRAME_SIZE_MAX (%u)", + rxmode->mtu, NFP_FRAME_SIZE_MAX); return -ERANGE; } @@ -420,7 +453,8 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw) } static inline void -nfp_net_enbable_rxvlan_cap(struct nfp_net_hw *hw, uint32_t *ctrl) +nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw, + uint32_t *ctrl) { if ((hw->cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) *ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2; @@ -431,21 +465,21 @@ nfp_net_enbable_rxvlan_cap(struct nfp_net_hw *hw, uint32_t *ctrl) void nfp_net_enable_queues(struct rte_eth_dev *dev) { + uint16_t i; struct nfp_net_hw *hw; - uint64_t enabled_queues = 0; - int i; + uint64_t enabled_queues; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* Enabling the required TX queues in the device */ + enabled_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) enabled_queues |= (1 << i); nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues); - enabled_queues = 0; - /* Enabling the required RX queues in the device */ + enabled_queues = 0; for (i = 0; i < dev->data->nb_rx_queues; i++) enabled_queues |= (1 << i); @@ -455,8 +489,9 @@ nfp_net_enable_queues(struct rte_eth_dev *dev) void nfp_net_disable_queues(struct rte_eth_dev *dev) { + uint32_t update; + uint32_t new_ctrl; struct nfp_net_hw *hw; - uint32_t new_ctrl, update = 0; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -464,14 +499,15 @@ nfp_net_disable_queues(struct rte_eth_dev *dev) nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0); new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE; - update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING | - NFP_NET_CFG_UPDATE_MSIX; + update = NFP_NET_CFG_UPDATE_GEN | + NFP_NET_CFG_UPDATE_RING | + NFP_NET_CFG_UPDATE_MSIX; - if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; /* If an error when reconfig we avoid to change hw state */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + if (nfp_net_reconfig(hw, new_ctrl, update) != 0) return; hw->ctrl = new_ctrl; @@ -491,85 +527,86 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) } void -nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) +nfp_net_write_mac(struct nfp_net_hw *hw, + uint8_t *mac) { - uint32_t mac0 = *(uint32_t *)mac; + uint32_t mac0; uint16_t mac1; + mac0 = *(uint32_t *)mac; nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR); mac += 4; mac1 = *(uint16_t *)mac; nn_writew(rte_cpu_to_be_16(mac1), - hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); + hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); } int -nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +nfp_net_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) { + uint32_t ctrl; + uint32_t update; struct nfp_net_hw *hw; - uint32_t update, ctrl; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && - !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) { - PMD_INIT_LOG(INFO, "MAC address unable to change when" - " port enabled"); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 && + (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) { + PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled"); return -EBUSY; } /* Writing new MAC to the specific port BAR address */ nfp_net_write_mac(hw, (uint8_t *)mac_addr); - /* Signal the NIC about the change */ update = NFP_NET_CFG_UPDATE_MACADDR; ctrl = hw->ctrl; - if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && - (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 && + (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0) ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; - if (nfp_net_reconfig(hw, ctrl, update) < 0) { - PMD_INIT_LOG(INFO, "MAC address update failed"); + + /* Signal the NIC about the change */ + if (nfp_net_reconfig(hw, ctrl, update) != 0) { + PMD_DRV_LOG(ERR, "MAC address update failed"); return -EIO; } + return 0; } int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, - struct rte_intr_handle *intr_handle) + struct rte_intr_handle *intr_handle) { + uint16_t i; struct nfp_net_hw *hw; - int i; if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", - dev->data->nb_rx_queues)) { - PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec", dev->data->nb_rx_queues); + dev->data->nb_rx_queues) != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); return -ENOMEM; } hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { - PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO"); - /* UIO just supports one queue and no LSC*/ + PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO"); + /* UIO just supports one queue and no LSC */ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0); - if (rte_intr_vec_list_index_set(intr_handle, 0, 0)) + if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0) return -1; } else { - PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO"); + PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO"); for (i = 0; i < dev->data->nb_rx_queues; i++) { /* * The first msix vector is reserved for non - * efd interrupts - */ + * efd interrupts. + */ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); - if (rte_intr_vec_list_index_set(intr_handle, i, - i + 1)) + if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0) return -1; - PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i, - rte_intr_vec_list_index_get(intr_handle, - i)); } } @@ -581,65 +618,65 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, uint32_t nfp_check_offloads(struct rte_eth_dev *dev) { + uint32_t ctrl = 0; + uint64_t rx_offload; + uint64_t tx_offload; struct nfp_net_hw *hw; struct rte_eth_conf *dev_conf; - struct rte_eth_rxmode *rxmode; - struct rte_eth_txmode *txmode; - uint32_t ctrl = 0; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); dev_conf = &dev->data->dev_conf; - rxmode = &dev_conf->rxmode; - txmode = &dev_conf->txmode; + rx_offload = dev_conf->rxmode.offloads; + tx_offload = dev_conf->txmode.offloads; - if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) { - if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) { + if ((hw->cap & NFP_NET_CFG_CTRL_RXCSUM) != 0) ctrl |= NFP_NET_CFG_CTRL_RXCSUM; } - if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) - nfp_net_enbable_rxvlan_cap(hw, &ctrl); + if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0) + nfp_net_enable_rxvlan_cap(hw, &ctrl); - if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) { - if (hw->cap & NFP_NET_CFG_CTRL_RXQINQ) + if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) { + if ((hw->cap & NFP_NET_CFG_CTRL_RXQINQ) != 0) ctrl |= NFP_NET_CFG_CTRL_RXQINQ; } hw->mtu = dev->data->mtu; - if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) { - if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) + if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) { + if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0) ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2; - else if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) + else if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN) != 0) ctrl |= NFP_NET_CFG_CTRL_TXVLAN; } /* L2 broadcast */ - if (hw->cap & NFP_NET_CFG_CTRL_L2BC) + if ((hw->cap & NFP_NET_CFG_CTRL_L2BC) != 0) ctrl |= NFP_NET_CFG_CTRL_L2BC; /* L2 multicast */ - if (hw->cap & NFP_NET_CFG_CTRL_L2MC) + if ((hw->cap & NFP_NET_CFG_CTRL_L2MC) != 0) ctrl |= NFP_NET_CFG_CTRL_L2MC; /* TX checksum offload */ - if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM || - txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || - txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) + if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 || + (tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 || + (tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0) ctrl |= NFP_NET_CFG_CTRL_TXCSUM; /* LSO offload */ - if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO || - txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) { - if (hw->cap & NFP_NET_CFG_CTRL_LSO) + if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 || + (tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) { + if ((hw->cap & NFP_NET_CFG_CTRL_LSO) != 0) ctrl |= NFP_NET_CFG_CTRL_LSO; else ctrl |= NFP_NET_CFG_CTRL_LSO2; } /* RX gather */ - if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) + if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0) ctrl |= NFP_NET_CFG_CTRL_GATHER; return ctrl; @@ -648,13 +685,12 @@ nfp_check_offloads(struct rte_eth_dev *dev) int nfp_net_promisc_enable(struct rte_eth_dev *dev) { - uint32_t new_ctrl, update = 0; - struct nfp_net_hw *hw; int ret; + uint32_t new_ctrl; + uint32_t update = 0; + struct nfp_net_hw *hw; struct nfp_flower_representor *repr; - PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); - if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) { repr = dev->data->dev_private; hw = repr->app_fw_flower->pf_hw; @@ -663,11 +699,11 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) } if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); + PMD_DRV_LOG(ERR, "Promiscuous mode not supported"); return -ENOTSUP; } - if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { + if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) { PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); return 0; } @@ -675,12 +711,8 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; update = NFP_NET_CFG_UPDATE_GEN; - /* - * DPDK sets promiscuous mode on just after this call assuming - * it can not fail ... - */ ret = nfp_net_reconfig(hw, new_ctrl, update); - if (ret < 0) + if (ret != 0) return ret; hw->ctrl = new_ctrl; @@ -691,9 +723,10 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) int nfp_net_promisc_disable(struct rte_eth_dev *dev) { - uint32_t new_ctrl, update = 0; - struct nfp_net_hw *hw; int ret; + uint32_t new_ctrl; + uint32_t update = 0; + struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -705,12 +738,8 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; update = NFP_NET_CFG_UPDATE_GEN; - /* - * DPDK sets promiscuous mode off just before this call - * assuming it can not fail ... - */ ret = nfp_net_reconfig(hw, new_ctrl, update); - if (ret < 0) + if (ret != 0) return ret; hw->ctrl = new_ctrl; @@ -719,32 +748,29 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) } /* - * return 0 means link status changed, -1 means not changed + * Return 0 means link status changed, -1 means not changed * * Wait to complete is needed as it can take up to 9 seconds to get the Link * status. */ int -nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +nfp_net_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) { int ret; uint32_t i; - uint32_t nn_link_status; struct nfp_net_hw *hw; + uint32_t nn_link_status; struct rte_eth_link link; struct nfp_eth_table *nfp_eth_table; - - PMD_DRV_LOG(DEBUG, "Link update"); - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* Read link status */ - nn_link_status = nn_cfg_readw(hw, NFP_NET_CFG_STS); - memset(&link, 0, sizeof(struct rte_eth_link)); - if (nn_link_status & NFP_NET_CFG_STS_LINK) + /* Read link status */ + nn_link_status = nn_cfg_readw(hw, NFP_NET_CFG_STS); + if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0) link.link_status = RTE_ETH_LINK_UP; link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; @@ -763,7 +789,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) } } } else { - /** + /* * Shift and mask nn_link_status so that it is effectively the value * at offset NFP_NET_CFG_STS_NSP_LINK_RATE. */ @@ -776,13 +802,13 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) ret = rte_eth_linkstatus_set(dev, &link); if (ret == 0) { - if (link.link_status) + if (link.link_status != 0) PMD_DRV_LOG(INFO, "NIC Link is Up"); else PMD_DRV_LOG(INFO, "NIC Link is Down"); } - /** + /* * Notify the port to update the speed value in the CTRL BAR from NSP. * Not applicable for VFs as the associated PF is still attached to the * kernel driver. @@ -794,161 +820,144 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) } int -nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +nfp_net_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) { - int i; + uint16_t i; struct nfp_net_hw *hw; struct rte_eth_stats nfp_dev_stats; - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (stats == NULL) + return -EINVAL; - /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */ + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats)); - /* reading per RX ring stats */ + /* Reading per RX ring stats */ for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) break; nfp_dev_stats.q_ipackets[i] = - nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); - + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); nfp_dev_stats.q_ipackets[i] -= - hw->eth_stats_base.q_ipackets[i]; + hw->eth_stats_base.q_ipackets[i]; nfp_dev_stats.q_ibytes[i] = - nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); - + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); nfp_dev_stats.q_ibytes[i] -= - hw->eth_stats_base.q_ibytes[i]; + hw->eth_stats_base.q_ibytes[i]; } - /* reading per TX ring stats */ + /* Reading per TX ring stats */ for (i = 0; i < dev->data->nb_tx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) break; nfp_dev_stats.q_opackets[i] = - nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); - - nfp_dev_stats.q_opackets[i] -= - hw->eth_stats_base.q_opackets[i]; + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); + nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i]; nfp_dev_stats.q_obytes[i] = - nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); - - nfp_dev_stats.q_obytes[i] -= - hw->eth_stats_base.q_obytes[i]; + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); + nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i]; } - nfp_dev_stats.ipackets = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); - + nfp_dev_stats.ipackets = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets; - nfp_dev_stats.ibytes = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); - + nfp_dev_stats.ibytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes; nfp_dev_stats.opackets = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); - + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); nfp_dev_stats.opackets -= hw->eth_stats_base.opackets; nfp_dev_stats.obytes = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); - + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; - /* reading general device stats */ + /* Reading general device stats */ nfp_dev_stats.ierrors = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); - + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors; nfp_dev_stats.oerrors = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); - + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; /* RX ring mbuf allocation failures */ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; nfp_dev_stats.imissed = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); - + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); nfp_dev_stats.imissed -= hw->eth_stats_base.imissed; - if (stats) { - memcpy(stats, &nfp_dev_stats, sizeof(*stats)); - return 0; - } - return -EINVAL; + memcpy(stats, &nfp_dev_stats, sizeof(*stats)); + return 0; } +/* + * hw->eth_stats_base records the per counter starting point. + * Lets update it now. + */ int nfp_net_stats_reset(struct rte_eth_dev *dev) { - int i; + uint16_t i; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* - * hw->eth_stats_base records the per counter starting point. - * Lets update it now - */ - - /* reading per RX ring stats */ + /* Reading per RX ring stats */ for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) break; hw->eth_stats_base.q_ipackets[i] = - nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); hw->eth_stats_base.q_ibytes[i] = - nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); } - /* reading per TX ring stats */ + /* Reading per TX ring stats */ for (i = 0; i < dev->data->nb_tx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) break; hw->eth_stats_base.q_opackets[i] = - nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); hw->eth_stats_base.q_obytes[i] = - nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); } hw->eth_stats_base.ipackets = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); hw->eth_stats_base.ibytes = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); hw->eth_stats_base.opackets = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); hw->eth_stats_base.obytes = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); - /* reading general device stats */ + /* Reading general device stats */ hw->eth_stats_base.ierrors = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); hw->eth_stats_base.oerrors = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); /* RX ring mbuf allocation failures */ dev->data->rx_mbuf_alloc_failed = 0; hw->eth_stats_base.imissed = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); return 0; } @@ -956,16 +965,18 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) uint32_t nfp_net_xstats_size(const struct rte_eth_dev *dev) { - /* If the device is a VF, then there will be no MAC stats */ - struct nfp_net_hw *hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t count; + struct nfp_net_hw *hw; const uint32_t size = RTE_DIM(nfp_net_xstats); + /* If the device is a VF, then there will be no MAC stats */ + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac_stats == NULL) { - uint32_t count; for (count = 0; count < size; count++) { if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC) break; } + return count; } @@ -1004,7 +1015,7 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev, if (raw) return value; - /** + /* * A baseline value of each statistic counter is recorded when stats are "reset". * Thus, the value returned by this function need to be decremented by this * baseline value. The result is the count of this statistic since the last time @@ -1013,12 +1024,12 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev, return value - hw->eth_xstats_base[index].value; } +/* NOTE: All callers ensure dev is always set. */ int nfp_net_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int size) { - /* NOTE: All callers ensure dev is always set. */ uint32_t id; uint32_t nfp_size; uint32_t read_size; @@ -1038,12 +1049,12 @@ nfp_net_xstats_get_names(struct rte_eth_dev *dev, return read_size; } +/* NOTE: All callers ensure dev is always set. */ int nfp_net_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n) { - /* NOTE: All callers ensure dev is always set. */ uint32_t id; uint32_t nfp_size; uint32_t read_size; @@ -1064,16 +1075,16 @@ nfp_net_xstats_get(struct rte_eth_dev *dev, return read_size; } +/* + * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev, + * ids, xstats_names and size are valid, and non-NULL. + */ int nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev, const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, unsigned int size) { - /** - * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev, - * ids, xstats_names and size are valid, and non-NULL. - */ uint32_t i; uint32_t read_size; @@ -1095,16 +1106,16 @@ nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev, return read_size; } +/* + * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev, + * ids, values and n are valid, and non-NULL. + */ int nfp_net_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, uint64_t *values, unsigned int n) { - /** - * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev, - * ids, values and n are valid, and non-NULL. - */ uint32_t i; uint32_t read_size; @@ -1139,37 +1150,21 @@ nfp_net_xstats_reset(struct rte_eth_dev *dev) hw->eth_xstats_base[id].id = id; hw->eth_xstats_base[id].value = nfp_net_xstats_value(dev, id, true); } - /** - * Successfully reset xstats, now call function to reset basic stats - * return value is then based on the success of that function - */ + + /* Successfully reset xstats, now call function to reset basic stats. */ return nfp_net_stats_reset(dev); } -int +void nfp_net_rx_desc_limits(struct nfp_net_hw *hw, uint16_t *min_rx_desc, uint16_t *max_rx_desc) { - *max_rx_desc = NFP_NET_MAX_RX_DESC; - - switch (hw->device_id) { - case PCI_DEVICE_ID_NFP3800_PF_NIC: - case PCI_DEVICE_ID_NFP3800_VF_NIC: - *min_rx_desc = NFP3800_NET_MIN_RX_DESC; - return 0; - case PCI_DEVICE_ID_NFP4000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_VF_NIC: - *min_rx_desc = NFP_NET_MIN_RX_DESC; - return 0; - default: - PMD_DRV_LOG(ERR, "Unknown NFP device id."); - return -EINVAL; - } + *max_rx_desc = hw->dev_info->max_qc_size; + *min_rx_desc = hw->dev_info->min_qc_size; } -int +void nfp_net_tx_desc_limits(struct nfp_net_hw *hw, uint16_t *min_tx_desc, uint16_t *max_tx_desc) @@ -1181,28 +1176,14 @@ nfp_net_tx_desc_limits(struct nfp_net_hw *hw, else tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT; - *max_tx_desc = NFP_NET_MAX_TX_DESC / tx_dpp; - - switch (hw->device_id) { - case PCI_DEVICE_ID_NFP3800_PF_NIC: - case PCI_DEVICE_ID_NFP3800_VF_NIC: - *min_tx_desc = NFP3800_NET_MIN_TX_DESC / tx_dpp; - return 0; - case PCI_DEVICE_ID_NFP4000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_VF_NIC: - *min_tx_desc = NFP_NET_MIN_TX_DESC / tx_dpp; - return 0; - default: - PMD_DRV_LOG(ERR, "Unknown NFP device id."); - return -EINVAL; - } + *max_tx_desc = hw->dev_info->max_qc_size / tx_dpp; + *min_tx_desc = hw->dev_info->min_qc_size / tx_dpp; } int nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - int ret; + uint32_t cap_extend; uint16_t min_rx_desc; uint16_t max_rx_desc; uint16_t min_tx_desc; @@ -1211,18 +1192,14 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - ret = nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc); - if (ret != 0) - return ret; - - ret = nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); - if (ret != 0) - return ret; + nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc); + nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; - /* + + /** * The maximum rx packet length (max_rx_pktlen) is set to the * maximum supported frame size that the NFP can handle. This * includes layer 2 headers, CRC and other metadata that can @@ -1236,34 +1213,40 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; - if (hw->cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) + if ((hw->cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0) dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP; - if (hw->cap & NFP_NET_CFG_CTRL_RXQINQ) + if ((hw->cap & NFP_NET_CFG_CTRL_RXQINQ) != 0) dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; - if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + if ((hw->cap & NFP_NET_CFG_CTRL_RXCSUM) != 0) dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | - RTE_ETH_RX_OFFLOAD_UDP_CKSUM | - RTE_ETH_RX_OFFLOAD_TCP_CKSUM; + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM; - if (hw->cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) + if ((hw->cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0) dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT; - if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) + if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) != 0) dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | - RTE_ETH_TX_OFFLOAD_UDP_CKSUM | - RTE_ETH_TX_OFFLOAD_TCP_CKSUM; + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM; - if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) { + if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) { dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO; - if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) + if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO; } - if (hw->cap & NFP_NET_CFG_CTRL_GATHER) + if ((hw->cap & NFP_NET_CFG_CTRL_GATHER) != 0) dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; + cap_extend = hw->cap_ext; + if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) { + dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY; + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY; + } + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = DEFAULT_RX_PTHRESH, @@ -1298,25 +1281,28 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG, }; - if (hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) { + if ((hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) { dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH; dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 | - RTE_ETH_RSS_NONFRAG_IPV4_TCP | - RTE_ETH_RSS_NONFRAG_IPV4_UDP | - RTE_ETH_RSS_NONFRAG_IPV4_SCTP | - RTE_ETH_RSS_IPV6 | - RTE_ETH_RSS_NONFRAG_IPV6_TCP | - RTE_ETH_RSS_NONFRAG_IPV6_UDP | - RTE_ETH_RSS_NONFRAG_IPV6_SCTP; + RTE_ETH_RSS_NONFRAG_IPV4_TCP | + RTE_ETH_RSS_NONFRAG_IPV4_UDP | + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | + RTE_ETH_RSS_IPV6 | + RTE_ETH_RSS_NONFRAG_IPV6_TCP | + RTE_ETH_RSS_NONFRAG_IPV6_UDP | + RTE_ETH_RSS_NONFRAG_IPV6_SCTP; dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; } - dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G | - RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G | - RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_10G | + RTE_ETH_LINK_SPEED_25G | + RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_50G | + RTE_ETH_LINK_SPEED_100G; return 0; } @@ -1349,12 +1335,13 @@ nfp_net_common_init(struct rte_pci_device *pci_dev, /* Get some of the read-only fields from the config BAR */ hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); + hw->cap_ext = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->flbufsz = DEFAULT_FLBUF_SIZE; nfp_net_init_metadata_format(hw); - /* read the Rx offset configured from firmware */ + /* Read the Rx offset configured from firmware */ if (hw->ver.major < 2) hw->rx_offset = NFP_NET_RX_OFFSET; else @@ -1371,7 +1358,6 @@ const uint32_t * nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { - /* refers to nfp_net_set_hash() */ RTE_PTYPE_INNER_L3_IPV4, RTE_PTYPE_INNER_L3_IPV6, RTE_PTYPE_INNER_L3_IPV6_EXT, @@ -1381,75 +1367,74 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) if (dev->rx_pkt_burst == nfp_net_recv_pkts) return ptypes; + return NULL; } int -nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id) { - struct rte_pci_device *pci_dev; + uint16_t base = 0; struct nfp_net_hw *hw; - int base = 0; + struct rte_pci_device *pci_dev; - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = RTE_ETH_DEV_TO_PCI(dev); - - if (rte_intr_type_get(pci_dev->intr_handle) != - RTE_INTR_HANDLE_UIO) + if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO) base = 1; /* Make sure all updates are written before un-masking */ rte_wmb(); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), - NFP_NET_CFG_ICR_UNMASKED); + NFP_NET_CFG_ICR_UNMASKED); return 0; } int -nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id) { - struct rte_pci_device *pci_dev; + uint16_t base = 0; struct nfp_net_hw *hw; - int base = 0; + struct rte_pci_device *pci_dev; - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = RTE_ETH_DEV_TO_PCI(dev); - - if (rte_intr_type_get(pci_dev->intr_handle) != - RTE_INTR_HANDLE_UIO) + if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO) base = 1; /* Make sure all updates are written before un-masking */ rte_wmb(); - nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX); + return 0; } static void nfp_net_dev_link_status_print(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); rte_eth_linkstatus_get(dev, &link); - if (link.link_status) + if (link.link_status != 0) PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", - dev->data->port_id, link.link_speed, - link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX - ? "full-duplex" : "half-duplex"); + dev->data->port_id, link.link_speed, + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); else - PMD_DRV_LOG(INFO, " Port %d: Link Down", - dev->data->port_id); + PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id); PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT, - pci_dev->addr.domain, pci_dev->addr.bus, - pci_dev->addr.devid, pci_dev->addr.function); + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); } -/* Interrupt configuration and handling */ - /* - * nfp_net_irq_unmask - Unmask an interrupt + * Unmask an interrupt * * If MSI-X auto-masking is enabled clear the mask bit, otherwise * clear the ICR for the entry. @@ -1463,28 +1448,26 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = RTE_ETH_DEV_TO_PCI(dev); - if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { + /* Make sure all updates are written before un-masking */ + rte_wmb(); + + if ((hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) { /* If MSI-X auto-masking is used, clear the entry */ - rte_wmb(); rte_intr_ack(pci_dev->intr_handle); } else { - /* Make sure all updates are written before un-masking */ - rte_wmb(); nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX), - NFP_NET_CFG_ICR_UNMASKED); + NFP_NET_CFG_ICR_UNMASKED); } } -/* +/** * Interrupt handler which shall be registered for alarm callback for delayed * handling specific interrupt to wait for the stable nic state. As the NIC * interrupt state is not stable for nfp after link is just down, it needs * to wait 4 seconds to get the stable status. * - * @param handle Pointer to interrupt handle. - * @param param The address of parameter (struct rte_eth_dev *) - * - * @return void + * @param param + * The address of parameter (struct rte_eth_dev *) */ void nfp_net_dev_interrupt_delayed_handler(void *param) @@ -1513,19 +1496,18 @@ nfp_net_dev_interrupt_handler(void *param) nfp_net_link_update(dev, 0); - /* likely to up */ + /* Likely to up */ if (link.link_status == 0) { - /* handle it 1 sec later, wait it being stable */ + /* Handle it 1 sec later, wait it being stable */ timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT; - /* likely to down */ - } else { - /* handle it 4 sec later, wait it being stable */ + } else { /* Likely to down */ + /* Handle it 4 sec later, wait it being stable */ timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT; } if (rte_eal_alarm_set(timeout * 1000, - nfp_net_dev_interrupt_delayed_handler, - (void *)dev) < 0) { + nfp_net_dev_interrupt_delayed_handler, + (void *)dev) != 0) { PMD_INIT_LOG(ERR, "Error setting alarm"); /* Unmasking */ nfp_net_irq_unmask(dev); @@ -1533,27 +1515,28 @@ nfp_net_dev_interrupt_handler(void *param) } int -nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +nfp_net_dev_mtu_set(struct rte_eth_dev *dev, + uint16_t mtu) { struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* mtu setting is forbidden if port is started */ + /* MTU setting is forbidden if port is started */ if (dev->data->dev_started) { PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", - dev->data->port_id); + dev->data->port_id); return -EBUSY; } /* MTU larger than current mbufsize not supported */ if (mtu > hw->flbufsz) { PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported", - mtu, hw->flbufsz); + mtu, hw->flbufsz); return -ERANGE; } - /* writing to configuration space */ + /* Writing to configuration space */ nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu); hw->mtu = mtu; @@ -1562,32 +1545,32 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } int -nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) +nfp_net_vlan_offload_set(struct rte_eth_dev *dev, + int mask) { - uint32_t new_ctrl, update; - struct nfp_net_hw *hw; - struct rte_eth_conf *dev_conf; - uint32_t rxvlan_ctrl; int ret; + uint32_t update; + uint32_t new_ctrl; + uint64_t rx_offload; + struct nfp_net_hw *hw; + uint32_t rxvlan_ctrl = 0; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_conf = &dev->data->dev_conf; + rx_offload = dev->data->dev_conf.rxmode.offloads; new_ctrl = hw->ctrl; - rxvlan_ctrl = 0; - - nfp_net_enbable_rxvlan_cap(hw, &rxvlan_ctrl); /* VLAN stripping setting */ - if (mask & RTE_ETH_VLAN_STRIP_MASK) { - if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) { + nfp_net_enable_rxvlan_cap(hw, &rxvlan_ctrl); + if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0) new_ctrl |= rxvlan_ctrl; else new_ctrl &= ~rxvlan_ctrl; } /* QinQ stripping setting */ - if (mask & RTE_ETH_QINQ_STRIP_MASK) { - if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) + if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) { + if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ; else new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ; @@ -1599,44 +1582,50 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) update = NFP_NET_CFG_UPDATE_GEN; ret = nfp_net_reconfig(hw, new_ctrl, update); - if (ret == 0) - hw->ctrl = new_ctrl; + if (ret != 0) + return ret; - return ret; + hw->ctrl = new_ctrl; + + return 0; } static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint32_t reta, mask; - int i, j; - int idx, shift; - struct nfp_net_hw *hw = - NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t i; + uint16_t j; + uint16_t idx; + uint8_t mask; + uint32_t reta; + uint16_t shift; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)" + " doesn't match hardware can supported (%d)", + reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } /* * Update Redirection Table. There are 128 8bit-entries which can be - * manage as 32 32bit-entries + * manage as 32 32bit-entries. */ for (i = 0; i < reta_size; i += 4) { /* Handling 4 RSS entries per loop */ idx = i / RTE_ETH_RETA_GROUP_SIZE; shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF); - if (mask == 0) continue; reta = 0; + /* If all 4 entries were set, don't need read RETA register */ if (mask != 0xF) reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i); @@ -1644,28 +1633,31 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev, for (j = 0; j < 4; j++) { if ((mask & (0x1 << j)) == 0) continue; + + /* Clearing the entry bits */ if (mask != 0xF) - /* Clearing the entry bits */ reta &= ~(0xFF << (8 * j)); + reta |= reta_conf[idx].reta[shift + j] << (8 * j); } - nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, - reta); + + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta); } + return 0; } /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */ int nfp_net_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - struct nfp_net_hw *hw = - NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t update; int ret; + uint32_t update; + struct nfp_net_hw *hw; + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) return -EINVAL; @@ -1675,73 +1667,76 @@ nfp_net_reta_update(struct rte_eth_dev *dev, update = NFP_NET_CFG_UPDATE_RSS; - if (nfp_net_reconfig(hw, hw->ctrl, update) < 0) + if (nfp_net_reconfig(hw, hw->ctrl, update) != 0) return -EIO; return 0; } - /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */ +/* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */ int nfp_net_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint8_t i, j, mask; - int idx, shift; + uint16_t i; + uint16_t j; + uint16_t idx; + uint8_t mask; uint32_t reta; + uint16_t shift; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) return -EINVAL; if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)" + " doesn't match hardware can supported (%d)", + reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } /* * Reading Redirection Table. There are 128 8bit-entries which can be - * manage as 32 32bit-entries + * manage as 32 32bit-entries. */ for (i = 0; i < reta_size; i += 4) { /* Handling 4 RSS entries per loop */ idx = i / RTE_ETH_RETA_GROUP_SIZE; shift = i % RTE_ETH_RETA_GROUP_SIZE; - mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF); + mask = (reta_conf[idx].mask >> shift) & 0xF; if (mask == 0) continue; - reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + - shift); + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift); for (j = 0; j < 4; j++) { if ((mask & (0x1 << j)) == 0) continue; + reta_conf[idx].reta[shift + j] = - (uint8_t)((reta >> (8 * j)) & 0xF); + (uint8_t)((reta >> (8 * j)) & 0xF); } } + return 0; } static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) + struct rte_eth_rss_conf *rss_conf) { - struct nfp_net_hw *hw; + uint8_t i; + uint8_t key; uint64_t rss_hf; + struct nfp_net_hw *hw; uint32_t cfg_rss_ctrl = 0; - uint8_t key; - int i; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* Writing the key byte a byte */ + /* Writing the key byte by byte */ for (i = 0; i < rss_conf->rss_key_len; i++) { memcpy(&key, &rss_conf->rss_key[i], 1); nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key); @@ -1749,34 +1744,34 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev, rss_hf = rss_conf->rss_hf; - if (rss_hf & RTE_ETH_RSS_IPV4) + if ((rss_hf & RTE_ETH_RSS_IPV4) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4; - if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) + if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP; - if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) + if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP; - if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) + if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP; - if (rss_hf & RTE_ETH_RSS_IPV6) + if ((rss_hf & RTE_ETH_RSS_IPV6) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6; - if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) + if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP; - if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) + if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP; - if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) + if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0) cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP; cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; - /* configuring where to apply the RSS hash */ + /* Configuring where to apply the RSS hash */ nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); /* Writing the key size */ @@ -1787,7 +1782,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev, int nfp_net_rss_hash_update(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) + struct rte_eth_rss_conf *rss_conf) { uint32_t update; uint64_t rss_hf; @@ -1799,15 +1794,16 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, /* Checking if RSS is enabled */ if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) { - if (rss_hf != 0) { /* Enable RSS? */ + if (rss_hf != 0) { PMD_DRV_LOG(ERR, "RSS unsupported"); return -EINVAL; } + return 0; /* Nothing to do */ } if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { - PMD_DRV_LOG(ERR, "hash key too long"); + PMD_DRV_LOG(ERR, "RSS hash key too long"); return -EINVAL; } @@ -1815,7 +1811,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, update = NFP_NET_CFG_UPDATE_RSS; - if (nfp_net_reconfig(hw, hw->ctrl, update) < 0) + if (nfp_net_reconfig(hw, hw->ctrl, update) != 0) return -EIO; return 0; @@ -1823,12 +1819,12 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, int nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) + struct rte_eth_rss_conf *rss_conf) { + uint8_t i; + uint8_t key; uint64_t rss_hf; uint32_t cfg_rss_ctrl; - uint8_t key; - int i; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1839,28 +1835,28 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, rss_hf = rss_conf->rss_hf; cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL); - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0) rss_hf |= RTE_ETH_RSS_IPV4; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0) rss_hf |= RTE_ETH_RSS_IPV6; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP; - if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) + if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0) rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP; /* Propagate current RSS hash functions to caller */ @@ -1881,20 +1877,18 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, int nfp_net_rss_config_default(struct rte_eth_dev *dev) { + int ret; + uint8_t i; + uint8_t j; + uint16_t queue = 0; struct rte_eth_conf *dev_conf; struct rte_eth_rss_conf rss_conf; - struct rte_eth_rss_reta_entry64 nfp_reta_conf[2]; uint16_t rx_queues = dev->data->nb_rx_queues; - uint16_t queue; - int i, j, ret; - - PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues", - rx_queues); + struct rte_eth_rss_reta_entry64 nfp_reta_conf[2]; nfp_reta_conf[0].mask = ~0x0; nfp_reta_conf[1].mask = ~0x0; - queue = 0; for (i = 0; i < 0x40; i += 8) { for (j = i; j < (i + 8); j++) { nfp_reta_conf[0].reta[j] = queue; @@ -1902,17 +1896,18 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev) queue %= rx_queues; } } + ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80); if (ret != 0) return ret; dev_conf = &dev->data->dev_conf; if (dev_conf == NULL) { - PMD_DRV_LOG(INFO, "wrong rss conf"); + PMD_DRV_LOG(ERR, "Wrong rss conf"); return -EINVAL; } - rss_conf = dev_conf->rx_adv_conf.rss_conf; + rss_conf = dev_conf->rx_adv_conf.rss_conf; ret = nfp_net_rss_hash_write(dev, &rss_conf); return ret; @@ -1927,6 +1922,7 @@ nfp_net_stop_rx_queue(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { this_rx_q = dev->data->rx_queues[i]; nfp_net_reset_rx_queue(this_rx_q); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } @@ -1952,6 +1948,7 @@ nfp_net_stop_tx_queue(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { this_tx_q = dev->data->tx_queues[i]; nfp_net_reset_tx_queue(this_tx_q); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } @@ -1985,7 +1982,7 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw, for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) { nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), - (hw->vxlan_ports[i + 1] << 16) | hw->vxlan_ports[i]); + (hw->vxlan_ports[i + 1] << 16) | hw->vxlan_ports[i]); } rte_spinlock_lock(&hw->reconfig_lock); @@ -2002,10 +1999,11 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw, /* * The firmware with NFD3 can not handle DMA address requiring more - * than 40 bits + * than 40 bits. */ int -nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name) +nfp_net_check_dma_mask(struct nfp_net_hw *hw, + char *name) { if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 && rte_mem_check_dma_mask(40) != 0) { @@ -2053,7 +2051,8 @@ nfp_net_cfg_read_version(struct nfp_net_hw *hw) } static void -nfp_net_get_nsp_info(struct nfp_net_hw *hw, char *nsp_version) +nfp_net_get_nsp_info(struct nfp_net_hw *hw, + char *nsp_version) { struct nfp_nsp *nsp; @@ -2069,7 +2068,8 @@ nfp_net_get_nsp_info(struct nfp_net_hw *hw, char *nsp_version) } static void -nfp_net_get_mip_name(struct nfp_net_hw *hw, char *mip_name) +nfp_net_get_mip_name(struct nfp_net_hw *hw, + char *mip_name) { struct nfp_mip *mip; @@ -2083,7 +2083,8 @@ nfp_net_get_mip_name(struct nfp_net_hw *hw, char *mip_name) } static void -nfp_net_get_app_name(struct nfp_net_hw *hw, char *app_name) +nfp_net_get_app_name(struct nfp_net_hw *hw, + char *app_name) { switch (hw->pf_dev->app_fw_id) { case NFP_APP_FW_CORE_NIC: diff --git a/drivers/net/nfp/nfp_common.h b/drivers/net/nfp/nfp_common.h index 1ce51d44d4c..c7f467e33cd 100644 --- a/drivers/net/nfp/nfp_common.h +++ b/drivers/net/nfp/nfp_common.h @@ -3,52 +3,25 @@ * All rights reserved. */ -#ifndef _NFP_COMMON_H_ -#define _NFP_COMMON_H_ +#ifndef __NFP_COMMON_H__ +#define __NFP_COMMON_H__ -#include "nfp_ctrl.h" - -#define NFP_NET_PMD_VERSION "0.1" -#define PCI_VENDOR_ID_NETRONOME 0x19ee -#define PCI_VENDOR_ID_CORIGINE 0x1da8 - -#define PCI_DEVICE_ID_NFP3800_PF_NIC 0x3800 -#define PCI_DEVICE_ID_NFP3800_VF_NIC 0x3803 -#define PCI_DEVICE_ID_NFP4000_PF_NIC 0x4000 -#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000 -#define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003 /* Include NFP4000VF */ - -/* Forward declaration */ -struct nfp_net_adapter; - -#define NFP_TX_MAX_SEG UINT8_MAX -#define NFP_TX_MAX_MTU_SEG 8 +#include +#include +#include +#include -/* Bar allocation */ -#define NFP_NET_CRTL_BAR 0 -#define NFP_NET_TX_BAR 2 -#define NFP_NET_RX_BAR 2 -#define NFP_QCP_QUEUE_AREA_SZ 0x80000 +#include "nfp_ctrl.h" +#include "nfpcore/nfp_dev.h" /* Macros for accessing the Queue Controller Peripheral 'CSRs' */ #define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800) #define NFP_QCP_QUEUE_ADD_RPTR 0x0000 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004 #define NFP_QCP_QUEUE_STS_LO 0x0008 -#define NFP_QCP_QUEUE_STS_LO_READPTR_mask (0x3ffff) +#define NFP_QCP_QUEUE_STS_LO_READPTR_MASK (0x3ffff) #define NFP_QCP_QUEUE_STS_HI 0x000c -#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask (0x3ffff) - -#define NFP_PCIE_QCP_NFP3800_OFFSET 0x400000 -#define NFP_PCIE_QCP_NFP6000_OFFSET 0x80000 -#define NFP_PCIE_QUEUE_NFP3800_MASK 0x1ff -#define NFP_PCIE_QUEUE_NFP6000_MASK 0xff -#define NFP_PCIE_QCP_PF_OFFSET 0x0 -#define NFP_PCIE_QCP_VF_OFFSET 0x0 - -/* The offset of the queue controller queues in the PCIe Target */ -#define NFP_PCIE_QUEUE(_offset, _q, _mask) \ - ((_offset) + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & (_mask)))) +#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_MASK (0x3ffff) /* Interrupt definitions */ #define NFP_NET_IRQ_LSC_IDX 0 @@ -59,7 +32,7 @@ struct nfp_net_adapter; #define DEFAULT_RX_HTHRESH 8 #define DEFAULT_RX_WTHRESH 0 -#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 #define DEFAULT_TX_PTHRESH 32 #define DEFAULT_TX_HTHRESH 0 @@ -67,28 +40,12 @@ struct nfp_net_adapter; #define DEFAULT_TX_RSBIT_THRESH 32 /* Alignment for dma zones */ -#define NFP_MEMZONE_ALIGN 128 - -/* - * This is used by the reconfig protocol. It sets the maximum time waiting in - * milliseconds before a reconfig timeout happens. - */ -#define NFP_NET_POLL_TIMEOUT 5000 +#define NFP_MEMZONE_ALIGN 128 #define NFP_QCP_QUEUE_ADDR_SZ (0x800) -#define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ -#define NFP_NET_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ - /* Number of supported physical ports */ -#define NFP_MAX_PHYPORTS 12 - -/* Maximum supported NFP frame size (MTU + layer 2 headers) */ -#define NFP_FRAME_SIZE_MAX 10048 -#define DEFAULT_FLBUF_SIZE 9216 - -#include -#include +#define NFP_MAX_PHYPORTS 12 /* Firmware application ID's */ enum nfp_app_fw_id { @@ -96,7 +53,7 @@ enum nfp_app_fw_id { NFP_APP_FW_FLOWER_NIC = 0x3, }; -/* nfp_qcp_ptr - Read or Write Pointer of a queue */ +/* Read or Write Pointer of a queue */ enum nfp_qcp_ptr { NFP_QCP_READ_PTR = 0, NFP_QCP_WRITE_PTR @@ -107,40 +64,47 @@ enum nfp_net_meta_format { NFP_NET_METAFORMAT_CHAINED, }; +/* Parsed control BAR TLV capabilities */ +struct nfp_net_tlv_caps { + uint32_t mbox_off; /**< VNIC mailbox area offset */ + uint32_t mbox_len; /**< VNIC mailbox area length */ + uint32_t mbox_cmsg_types; /**< Cmsgs which can be passed through the mailbox */ +}; + struct nfp_pf_dev { - /* Backpointer to associated pci device */ + /** Backpointer to associated pci device */ struct rte_pci_device *pci_dev; enum nfp_app_fw_id app_fw_id; - /* Pointer to the app running on the PF */ + /** Pointer to the app running on the PF */ void *app_fw_priv; - /* The eth table reported by firmware */ + /** The eth table reported by firmware */ struct nfp_eth_table *nfp_eth_table; uint8_t *ctrl_bar; struct nfp_cpp *cpp; struct nfp_cpp_area *ctrl_area; - struct nfp_cpp_area *hwqueues_area; + struct nfp_cpp_area *qc_area; - uint8_t *hw_queues; + uint8_t *qc_bar; struct nfp_hwinfo *hwinfo; struct nfp_rtsym_table *sym_tbl; - /* service id of cpp bridge service */ + /** Service id of cpp bridge service */ uint32_t cpp_bridge_id; }; struct nfp_app_fw_nic { - /* Backpointer to the PF device */ + /** Backpointer to the PF device */ struct nfp_pf_dev *pf_dev; - /* - * Array of physical ports belonging to the this CoreNIC app - * This is really a list of vNIC's. One for each physical port + /** + * Array of physical ports belonging to this CoreNIC app. + * This is really a list of vNIC's, one for each physical port. */ struct nfp_net_hw *ports[NFP_MAX_PHYPORTS]; @@ -149,13 +113,14 @@ struct nfp_app_fw_nic { }; struct nfp_net_hw { - /* Backpointer to the PF this port belongs to */ + /** Backpointer to the PF this port belongs to */ struct nfp_pf_dev *pf_dev; - /* Backpointer to the eth_dev of this port*/ + /** Backpointer to the eth_dev of this port */ struct rte_eth_dev *eth_dev; - /* Info from the firmware */ + /** Info from the firmware */ + uint32_t cap_ext; struct nfp_net_fw_ver ver; uint32_t cap; uint32_t max_mtu; @@ -163,7 +128,10 @@ struct nfp_net_hw { uint32_t rx_offset; enum nfp_net_meta_format meta_format; - /* Current values for control */ + /** NFP ASIC params */ + const struct nfp_dev_info *dev_info; + + /** Current values for control */ uint32_t ctrl; uint8_t *ctrl_bar; @@ -189,7 +157,7 @@ struct nfp_net_hw { struct rte_ether_addr mac_addr; - /* Records starting point for counters */ + /** Records starting point for counters */ struct rte_eth_stats eth_stats_base; struct rte_eth_xstat *eth_xstats_base; @@ -199,50 +167,65 @@ struct nfp_net_hw { uint8_t *mac_stats_bar; uint8_t *mac_stats; - /* Sequential physical port number, only valid for CoreNIC firmware */ + /** Sequential physical port number, only valid for CoreNIC firmware */ uint8_t idx; - /* Internal port number as seen from NFP */ + /** Internal port number as seen from NFP */ uint8_t nfp_idx; + + struct nfp_net_tlv_caps tlv_caps; + + struct nfp_net_ipsec_data *ipsec_data; }; struct nfp_net_adapter { struct nfp_net_hw hw; }; -static inline uint8_t nn_readb(volatile const void *addr) +static inline uint8_t +nn_readb(volatile const void *addr) { return rte_read8(addr); } -static inline void nn_writeb(uint8_t val, volatile void *addr) +static inline void +nn_writeb(uint8_t val, + volatile void *addr) { rte_write8(val, addr); } -static inline uint32_t nn_readl(volatile const void *addr) +static inline uint32_t +nn_readl(volatile const void *addr) { return rte_read32(addr); } -static inline void nn_writel(uint32_t val, volatile void *addr) +static inline void +nn_writel(uint32_t val, + volatile void *addr) { rte_write32(val, addr); } -static inline uint16_t nn_readw(volatile const void *addr) +static inline uint16_t +nn_readw(volatile const void *addr) { return rte_read16(addr); } -static inline void nn_writew(uint16_t val, volatile void *addr) +static inline void +nn_writew(uint16_t val, + volatile void *addr) { rte_write16(val, addr); } -static inline uint64_t nn_readq(volatile void *addr) +static inline uint64_t +nn_readq(volatile void *addr) { + uint32_t low; + uint32_t high; const volatile uint32_t *p = addr; - uint32_t low, high; high = nn_readl((volatile const void *)(p + 1)); low = nn_readl((volatile const void *)p); @@ -250,72 +233,88 @@ static inline uint64_t nn_readq(volatile void *addr) return low + ((uint64_t)high << 32); } -static inline void nn_writeq(uint64_t val, volatile void *addr) +static inline void +nn_writeq(uint64_t val, + volatile void *addr) { nn_writel(val >> 32, (volatile char *)addr + 4); nn_writel(val, addr); } -/* - * Functions to read/write from/to Config BAR - * Performs any endian conversion necessary. - */ static inline uint8_t -nn_cfg_readb(struct nfp_net_hw *hw, int off) +nn_cfg_readb(struct nfp_net_hw *hw, + uint32_t off) { return nn_readb(hw->ctrl_bar + off); } static inline void -nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val) +nn_cfg_writeb(struct nfp_net_hw *hw, + uint32_t off, + uint8_t val) { nn_writeb(val, hw->ctrl_bar + off); } static inline uint16_t -nn_cfg_readw(struct nfp_net_hw *hw, int off) +nn_cfg_readw(struct nfp_net_hw *hw, + uint32_t off) { return rte_le_to_cpu_16(nn_readw(hw->ctrl_bar + off)); } static inline void -nn_cfg_writew(struct nfp_net_hw *hw, int off, uint16_t val) +nn_cfg_writew(struct nfp_net_hw *hw, + uint32_t off, + uint16_t val) { nn_writew(rte_cpu_to_le_16(val), hw->ctrl_bar + off); } static inline uint32_t -nn_cfg_readl(struct nfp_net_hw *hw, int off) +nn_cfg_readl(struct nfp_net_hw *hw, + uint32_t off) { return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off)); } static inline void -nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val) +nn_cfg_writel(struct nfp_net_hw *hw, + uint32_t off, + uint32_t val) { nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off); } static inline uint64_t -nn_cfg_readq(struct nfp_net_hw *hw, int off) +nn_cfg_readq(struct nfp_net_hw *hw, + uint32_t off) { return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off)); } static inline void -nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) +nn_cfg_writeq(struct nfp_net_hw *hw, + uint32_t off, + uint64_t val) { nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); } -/* - * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue - * @q: Base address for queue structure - * @ptr: Add to the Read or Write pointer - * @val: Value to add to the queue pointer +/** + * Add the value to the selected pointer of a queue. + * + * @param queue + * Base address for queue structure + * @param ptr + * Add to the read or write pointer + * @param val + * Value to add to the queue pointer */ static inline void -nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val) +nfp_qcp_ptr_add(uint8_t *queue, + enum nfp_qcp_ptr ptr, + uint32_t val) { uint32_t off; @@ -324,16 +323,20 @@ nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val) else off = NFP_QCP_QUEUE_ADD_WPTR; - nn_writel(rte_cpu_to_le_32(val), q + off); + nn_writel(rte_cpu_to_le_32(val), queue + off); } -/* - * nfp_qcp_read - Read the current Read/Write pointer value for a queue - * @q: Base address for queue structure - * @ptr: Read or Write pointer +/** + * Read the current read/write pointer value for a queue. + * + * @param queue + * Base address for queue structure + * @param ptr + * Read or Write pointer */ static inline uint32_t -nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr) +nfp_qcp_read(uint8_t *queue, + enum nfp_qcp_ptr ptr) { uint32_t off; uint32_t val; @@ -343,37 +346,26 @@ nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr) else off = NFP_QCP_QUEUE_STS_HI; - val = rte_cpu_to_le_32(nn_readl(q + off)); + val = rte_cpu_to_le_32(nn_readl(queue + off)); if (ptr == NFP_QCP_READ_PTR) - return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; + return val & NFP_QCP_QUEUE_STS_LO_READPTR_MASK; else - return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; + return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_MASK; } static inline uint32_t -nfp_pci_queue(struct rte_pci_device *pdev, uint16_t queue) +nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, + uint16_t queue) { - switch (pdev->id.device_id) { - case PCI_DEVICE_ID_NFP4000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_PF_NIC: - return NFP_PCIE_QUEUE(NFP_PCIE_QCP_PF_OFFSET, queue, - NFP_PCIE_QUEUE_NFP6000_MASK); - case PCI_DEVICE_ID_NFP3800_VF_NIC: - return NFP_PCIE_QUEUE(NFP_PCIE_QCP_VF_OFFSET, queue, - NFP_PCIE_QUEUE_NFP3800_MASK); - case PCI_DEVICE_ID_NFP6000_VF_NIC: - return NFP_PCIE_QUEUE(NFP_PCIE_QCP_VF_OFFSET, queue, - NFP_PCIE_QUEUE_NFP6000_MASK); - default: - return NFP_PCIE_QUEUE(NFP_PCIE_QCP_PF_OFFSET, queue, - NFP_PCIE_QUEUE_NFP3800_MASK); - } + return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * + (queue & dev_info->qc_idx_mask); } /* Prototypes for common NFP functions */ int nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update); int nfp_net_ext_reconfig(struct nfp_net_hw *hw, uint32_t ctrl_ext, uint32_t update); +int nfp_net_mbox_reconfig(struct nfp_net_hw *hw, uint32_t mbox_cmd); int nfp_net_configure(struct rte_eth_dev *dev); int nfp_net_common_init(struct rte_pci_device *pci_dev, struct nfp_net_hw *hw); void nfp_net_log_device_information(const struct nfp_net_hw *hw); @@ -383,12 +375,12 @@ void nfp_net_params_setup(struct nfp_net_hw *hw); void nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac); int nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, - struct rte_intr_handle *intr_handle); + struct rte_intr_handle *intr_handle); uint32_t nfp_check_offloads(struct rte_eth_dev *dev); int nfp_net_promisc_enable(struct rte_eth_dev *dev); int nfp_net_promisc_disable(struct rte_eth_dev *dev); int nfp_net_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); + __rte_unused int wait_to_complete); int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); int nfp_net_stats_reset(struct rte_eth_dev *dev); uint32_t nfp_net_xstats_size(const struct rte_eth_dev *dev); @@ -408,7 +400,7 @@ int nfp_net_xstats_get_by_id(struct rte_eth_dev *dev, unsigned int n); int nfp_net_xstats_reset(struct rte_eth_dev *dev); int nfp_net_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); + struct rte_eth_dev_info *dev_info); const uint32_t *nfp_net_supported_ptypes_get(struct rte_eth_dev *dev); int nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); int nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -419,25 +411,25 @@ void nfp_net_dev_interrupt_delayed_handler(void *param); int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); int nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask); int nfp_net_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); int nfp_net_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); int nfp_net_rss_hash_update(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf); + struct rte_eth_rss_conf *rss_conf); int nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf); + struct rte_eth_rss_conf *rss_conf); int nfp_net_rss_config_default(struct rte_eth_dev *dev); void nfp_net_stop_rx_queue(struct rte_eth_dev *dev); void nfp_net_close_rx_queue(struct rte_eth_dev *dev); void nfp_net_stop_tx_queue(struct rte_eth_dev *dev); void nfp_net_close_tx_queue(struct rte_eth_dev *dev); int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port); -int nfp_net_rx_desc_limits(struct nfp_net_hw *hw, +void nfp_net_rx_desc_limits(struct nfp_net_hw *hw, uint16_t *min_rx_desc, uint16_t *max_rx_desc); -int nfp_net_tx_desc_limits(struct nfp_net_hw *hw, +void nfp_net_tx_desc_limits(struct nfp_net_hw *hw, uint16_t *min_tx_desc, uint16_t *max_tx_desc); int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name); @@ -459,4 +451,4 @@ bool nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version); #define NFP_PRIV_TO_APP_FW_FLOWER(app_fw_priv)\ ((struct nfp_app_fw_flower *)app_fw_priv) -#endif /* _NFP_COMMON_H_ */ +#endif /* __NFP_COMMON_H__ */ diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c index 88cd1aa5720..36dcdca9dee 100644 --- a/drivers/net/nfp/nfp_cpp_bridge.c +++ b/drivers/net/nfp/nfp_cpp_bridge.c @@ -1,26 +1,27 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2014-2021 Netronome Systems, Inc. * All rights reserved. - * - * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. */ +#include "nfp_cpp_bridge.h" + #include #include #include #include "nfpcore/nfp_cpp.h" -#include "nfpcore/nfp_mip.h" -#include "nfpcore/nfp_nsp.h" - #include "nfp_logs.h" -#include "nfp_cpp_bridge.h" + +#define NFP_CPP_MEMIO_BOUNDARY (1 << 20) +#define NFP_BRIDGE_OP_READ 20 +#define NFP_BRIDGE_OP_WRITE 30 +#define NFP_BRIDGE_OP_IOCTL 40 + +#define NFP_IOCTL 'n' +#define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t) /* Prototypes */ -static int nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp); -static int nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp); -static int nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp); static int nfp_cpp_bridge_service_func(void *args); int @@ -42,7 +43,7 @@ nfp_map_service(uint32_t service_id) /* * Find a service core with the least number of services already - * registered to it + * registered to it. */ while (slcore_count--) { service_count = rte_service_lcore_count_services(slcore_array[slcore_count]); @@ -64,7 +65,7 @@ nfp_map_service(uint32_t service_id) rte_service_runstate_set(service_id, 1); rte_service_component_runstate_set(service_id, 1); rte_service_lcore_start(slcore); - if (rte_service_may_be_active(slcore)) + if (rte_service_may_be_active(slcore) != 0) PMD_INIT_LOG(INFO, "The service %s is running", service_name); else PMD_INIT_LOG(ERR, "The service %s is not running", service_name); @@ -94,7 +95,7 @@ nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev) pf_dev->cpp_bridge_id = service_id; PMD_INIT_LOG(INFO, "NFP cpp service registered"); - /* Map it to available service core*/ + /* Map it to available service core */ ret = nfp_map_service(service_id); if (ret != 0) { PMD_INIT_LOG(DEBUG, "Could not map nfp cpp service"); @@ -110,17 +111,22 @@ nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev) * of CPP interface handler configured by the PMD setup. */ static int -nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) +nfp_cpp_bridge_serve_write(int sockfd, + struct nfp_cpp *cpp) { - struct nfp_cpp_area *area; - off_t offset, nfp_offset; - uint32_t cpp_id, pos, len; + int err; + off_t offset; + uint32_t pos; + uint32_t len; + size_t count; + size_t curlen; + uint32_t cpp_id; + off_t nfp_offset; uint32_t tmpbuf[16]; - size_t count, curlen; - int err = 0; + struct nfp_cpp_area *area; - PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, - sizeof(off_t), sizeof(size_t)); + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu", __func__, + sizeof(off_t), sizeof(size_t)); /* Reading the count param */ err = recv(sockfd, &count, sizeof(off_t), 0); @@ -138,28 +144,28 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, - offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, - cpp_id, nfp_offset); + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd", __func__, count, + offset); + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd", __func__, + cpp_id, nfp_offset); /* Adjust length if not aligned */ if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) != - (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) { + (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) { curlen = NFP_CPP_MEMIO_BOUNDARY - - (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1)); + (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1)); } while (count > 0) { - /* configure a CPP PCIe2CPP BAR for mapping the CPP target */ + /* Configure a CPP PCIe2CPP BAR for mapping the CPP target */ area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev", - nfp_offset, curlen); + nfp_offset, curlen); if (area == NULL) { PMD_CPP_LOG(ERR, "area alloc fail"); return -EIO; } - /* mapping the target */ + /* Mapping the target */ err = nfp_cpp_area_acquire(area); if (err < 0) { PMD_CPP_LOG(ERR, "area acquire failed"); @@ -172,17 +178,17 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) if (len > sizeof(tmpbuf)) len = sizeof(tmpbuf); - PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__, - len, count); + PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu", __func__, + len, count); err = recv(sockfd, tmpbuf, len, MSG_WAITALL); if (err != (int)len) { - PMD_CPP_LOG(ERR, - "error when receiving, %d of %zu", - err, count); + PMD_CPP_LOG(ERR, "error when receiving, %d of %zu", + err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); return -EIO; } + err = nfp_cpp_area_write(area, pos, tmpbuf, len); if (err < 0) { PMD_CPP_LOG(ERR, "nfp_cpp_area_write error"); @@ -198,7 +204,7 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) count -= pos; curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ? - NFP_CPP_MEMIO_BOUNDARY : count; + NFP_CPP_MEMIO_BOUNDARY : count; } return 0; @@ -211,17 +217,22 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) * data is sent to the requester using the same socket. */ static int -nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) +nfp_cpp_bridge_serve_read(int sockfd, + struct nfp_cpp *cpp) { - struct nfp_cpp_area *area; - off_t offset, nfp_offset; - uint32_t cpp_id, pos, len; + int err; + off_t offset; + uint32_t pos; + uint32_t len; + size_t count; + size_t curlen; + uint32_t cpp_id; + off_t nfp_offset; uint32_t tmpbuf[16]; - size_t count, curlen; - int err = 0; + struct nfp_cpp_area *area; - PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, - sizeof(off_t), sizeof(size_t)); + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu", __func__, + sizeof(off_t), sizeof(size_t)); /* Reading the count param */ err = recv(sockfd, &count, sizeof(off_t), 0); @@ -239,21 +250,21 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, - offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, - cpp_id, nfp_offset); + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd", __func__, count, + offset); + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd", __func__, + cpp_id, nfp_offset); /* Adjust length if not aligned */ if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) != - (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) { + (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) { curlen = NFP_CPP_MEMIO_BOUNDARY - - (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1)); + (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1)); } while (count > 0) { area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev", - nfp_offset, curlen); + nfp_offset, curlen); if (area == NULL) { PMD_CPP_LOG(ERR, "area alloc failed"); return -EIO; @@ -278,14 +289,13 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) nfp_cpp_area_free(area); return -EIO; } - PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__, - len, count); + PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu", __func__, + len, count); err = send(sockfd, tmpbuf, len, 0); if (err != (int)len) { - PMD_CPP_LOG(ERR, - "error when sending: %d of %zu", - err, count); + PMD_CPP_LOG(ERR, "error when sending: %d of %zu", + err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); return -EIO; @@ -298,8 +308,9 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) count -= pos; curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ? - NFP_CPP_MEMIO_BOUNDARY : count; + NFP_CPP_MEMIO_BOUNDARY : count; } + return 0; } @@ -310,10 +321,13 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) * does not require any CPP access at all. */ static int -nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp) +nfp_cpp_bridge_serve_ioctl(int sockfd, + struct nfp_cpp *cpp) { - uint32_t cmd, ident_size, tmp; int err; + uint32_t cmd; + uint32_t tmp; + uint32_t ident_size; /* Reading now the IOCTL command */ err = recv(sockfd, &cmd, 4, 0); @@ -336,7 +350,7 @@ nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp) tmp = nfp_cpp_model(cpp); - PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x\n", __func__, tmp); + PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x", __func__, tmp); err = send(sockfd, &tmp, 4, 0); if (err != 4) { @@ -344,9 +358,9 @@ nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp) return -EIO; } - tmp = cpp->interface; + tmp = nfp_cpp_interface(cpp); - PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x\n", __func__, tmp); + PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x", __func__, tmp); err = send(sockfd, &tmp, 4, 0); if (err != 4) { @@ -368,13 +382,17 @@ nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp) static int nfp_cpp_bridge_service_func(void *args) { - struct sockaddr address; + int op; + int ret; + int sockfd; + int datafd; struct nfp_cpp *cpp; + struct sockaddr address; struct nfp_pf_dev *pf_dev; - int sockfd, datafd, op, ret; struct timeval timeout = {1, 0}; unlink("/tmp/nfp_cpp"); + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); if (sockfd < 0) { PMD_CPP_LOG(ERR, "socket creation error. Service failed"); @@ -389,7 +407,7 @@ nfp_cpp_bridge_service_func(void *args) strcpy(address.sa_data, "/tmp/nfp_cpp"); ret = bind(sockfd, (const struct sockaddr *)&address, - sizeof(struct sockaddr)); + sizeof(struct sockaddr)); if (ret < 0) { PMD_CPP_LOG(ERR, "bind error (%d). Service failed", errno); close(sockfd); @@ -417,15 +435,14 @@ nfp_cpp_bridge_service_func(void *args) return -EIO; } - while (1) { + for (;;) { ret = recv(datafd, &op, 4, 0); if (ret <= 0) { - PMD_CPP_LOG(DEBUG, "%s: socket close\n", - __func__); + PMD_CPP_LOG(DEBUG, "%s: socket close", __func__); break; } - PMD_CPP_LOG(DEBUG, "%s: getting op %u\n", __func__, op); + PMD_CPP_LOG(DEBUG, "%s: getting op %u", __func__, op); if (op == NFP_BRIDGE_OP_READ) nfp_cpp_bridge_serve_read(datafd, cpp); @@ -439,8 +456,10 @@ nfp_cpp_bridge_service_func(void *args) if (op == 0) break; } + close(datafd); } + close(sockfd); return 0; diff --git a/drivers/net/nfp/nfp_cpp_bridge.h b/drivers/net/nfp/nfp_cpp_bridge.h index 85289e158b1..a1103e85e4d 100644 --- a/drivers/net/nfp/nfp_cpp_bridge.h +++ b/drivers/net/nfp/nfp_cpp_bridge.h @@ -1,24 +1,14 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2014-2021 Netronome Systems, Inc. * All rights reserved. - * - * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. */ -#ifndef _NFP_CPP_BRIDGE_H_ -#define _NFP_CPP_BRIDGE_H_ +#ifndef __NFP_CPP_BRIDGE_H__ +#define __NFP_CPP_BRIDGE_H__ #include "nfp_common.h" -#define NFP_CPP_MEMIO_BOUNDARY (1 << 20) -#define NFP_BRIDGE_OP_READ 20 -#define NFP_BRIDGE_OP_WRITE 30 -#define NFP_BRIDGE_OP_IOCTL 40 - -#define NFP_IOCTL 'n' -#define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t) - int nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev); int nfp_map_service(uint32_t service_id); -#endif /* _NFP_CPP_BRIDGE_H_ */ +#endif /* __NFP_CPP_BRIDGE_H__ */ diff --git a/drivers/net/nfp/nfp_ctrl.c b/drivers/net/nfp/nfp_ctrl.c new file mode 100644 index 00000000000..6fc8cffd2e9 --- /dev/null +++ b/drivers/net/nfp/nfp_ctrl.c @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Corigine Systems, Inc. + * All rights reserved. + */ + +#include "nfp_ctrl.h" + +#include + +#include "nfpcore/nfp_platform.h" + +#include "nfp_common.h" +#include "nfp_logs.h" + +static void +nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps) +{ + memset(caps, 0, sizeof(*caps)); + caps->mbox_off = NFP_NET_CFG_MBOX_BASE; + caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ; +} + +int +nfp_net_tlv_caps_parse(struct rte_eth_dev *dev) +{ + uint32_t hdr; + uint8_t *end; + uint8_t *data; + uint32_t length; + uint32_t offset; + uint32_t tlv_type; + struct nfp_net_hw *hw; + struct nfp_net_tlv_caps *caps; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + caps = &hw->tlv_caps; + nfp_net_tlv_caps_reset(caps); + + data = hw->ctrl_bar + NFP_NET_CFG_TLV_BASE; + end = hw->ctrl_bar + NFP_NET_CFG_BAR_SZ; + + hdr = rte_read32(data); + if (hdr == 0) { + PMD_DRV_LOG(INFO, "TLV is empty!"); + return 0; + } + + for (; ; data += length) { + offset = data - hw->ctrl_bar; + + if (data + NFP_NET_CFG_TLV_VALUE > end) { + PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV"); + return -EINVAL; + } + + hdr = rte_read32(data); + + length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr); + if ((length & (NFP_NET_CFG_TLV_LENGTH_INC - 1)) != 0) { + PMD_DRV_LOG(ERR, "TLV size not multiple of 4B len: %u", length); + return -EINVAL; + } + + /* Advance past the header */ + data += NFP_NET_CFG_TLV_VALUE; + if (data + length > end) { + PMD_DRV_LOG(ERR, "Oversized TLV offset: %u len: %u", + offset, length); + return -EINVAL; + } + + tlv_type = FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr); + + switch (tlv_type) { + case NFP_NET_CFG_TLV_TYPE_UNKNOWN: + PMD_DRV_LOG(ERR, "Unknown TLV at offset: %u", offset); + return -EINVAL; + case NFP_NET_CFG_TLV_TYPE_RESERVED: + break; + case NFP_NET_CFG_TLV_TYPE_END: + if (length == 0) + return 0; + + PMD_DRV_LOG(ERR, "END TLV should be empty, has len: %u", length); + return -EINVAL; + case NFP_NET_CFG_TLV_TYPE_MBOX: + caps->mbox_len = length; + + if (length != 0) + caps->mbox_off = data - hw->ctrl_bar; + else + caps->mbox_off = 0; + break; + case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES: + if (length != 0) + caps->mbox_cmsg_types = rte_read32(data); + break; + default: + if (FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr) == 0) + break; + + PMD_DRV_LOG(ERR, "Unknown TLV type: %u offset: %u len: %u", + tlv_type, offset, length); + return -EINVAL; + } + } + + PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV"); + return -EINVAL; +} diff --git a/drivers/net/nfp/nfp_ctrl.h b/drivers/net/nfp/nfp_ctrl.h index 70075484471..9ec51e0a25c 100644 --- a/drivers/net/nfp/nfp_ctrl.h +++ b/drivers/net/nfp/nfp_ctrl.h @@ -3,8 +3,12 @@ * All rights reserved. */ -#ifndef _NFP_CTRL_H_ -#define _NFP_CTRL_H_ +#ifndef __NFP_CTRL_H__ +#define __NFP_CTRL_H__ + +#include + +#include /* * Configuration BAR size. @@ -16,7 +20,7 @@ /* Offset in Freelist buffer where packet starts on RX */ #define NFP_NET_RX_OFFSET 32 -/* working with metadata api (NFD version > 3.0) */ +/* Working with metadata api (NFD version > 3.0) */ #define NFP_NET_META_FIELD_SIZE 4 #define NFP_NET_META_FIELD_MASK ((1 << NFP_NET_META_FIELD_SIZE) - 1) #define NFP_NET_META_HEADER_SIZE 4 @@ -32,13 +36,14 @@ NFP_NET_META_VLAN_TPID_MASK) /* Prepend field types */ -#define NFP_NET_META_HASH 1 /* next field carries hash type */ +#define NFP_NET_META_HASH 1 /* Next field carries hash type */ #define NFP_NET_META_VLAN 4 #define NFP_NET_META_PORTID 5 +#define NFP_NET_META_IPSEC 9 #define NFP_META_PORT_ID_CTRL ~0U -/* Hash type pre-pended when a RSS hash was computed */ +/* Hash type prepended when a RSS hash was computed */ #define NFP_NET_RSS_NONE 0 #define NFP_NET_RSS_IPV4 1 #define NFP_NET_RSS_IPV6 2 @@ -97,7 +102,7 @@ #define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ #define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ #define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ -#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ +#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring */ #define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ #define NFP_NET_CFG_CTRL_TXVLAN_V2 (0x1 << 23) /* Enable VLAN insert with metadata */ #define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */ @@ -106,7 +111,7 @@ #define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */ #define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */ #define NFP_NET_CFG_CTRL_CSUM_COMPLETE (0x1 << 30) /* Checksum complete */ -#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31)/* live MAC addr change */ +#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31) /* Live MAC addr change */ #define NFP_NET_CFG_UPDATE 0x0004 #define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ #define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ @@ -119,6 +124,7 @@ #define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ #define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ #define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ +#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */ #define NFP_NET_CFG_UPDATE_ERR (0x1U << 31) /* A error occurred */ #define NFP_NET_CFG_TXRS_ENABLE 0x0008 #define NFP_NET_CFG_RXRS_ENABLE 0x0010 @@ -199,11 +205,14 @@ struct nfp_net_fw_ver { * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix) */ #define NFP_NET_CFG_SPARE_ADDR 0x0050 -/** +/* * NFP6000/NFP4000 - Prepend configuration */ -#define NFP_NET_CFG_RX_OFFSET 0x0050 -#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ +#define NFP_NET_CFG_RX_OFFSET 0x0050 +#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ + +/* Start anchor of the TLV area */ +#define NFP_NET_CFG_TLV_BASE 0x0058 /** * Reuse spare address to contain the offset from the start of @@ -230,6 +239,9 @@ struct nfp_net_fw_ver { */ #define NFP_NET_CFG_CTRL_WORD1 0x0098 #define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) +#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /**< IPsec offload */ +#define NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP (0x1 << 3) /**< SA short match lookup */ +#define NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP (0x1 << 4) /**< SA long match lookup */ #define NFP_NET_CFG_CAP_WORD1 0x00a4 @@ -268,7 +280,7 @@ struct nfp_net_fw_ver { * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) - * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_TXR_SZ: Per TX ring size (1B entries) * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries) @@ -287,7 +299,7 @@ struct nfp_net_fw_ver { * RX ring configuration (0x0800 - 0x0c00) * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration * @NFP_NET_CFG_RXR_ADDR: Per TX ring DMA address (8B entries) - * @NFP_NET_CFG_RXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_RXR_SZ: Per TX ring size (1B entries) * @NFP_NET_CFG_RXR_VEC: Per TX ring MSI-X table entry (1B entries) * @NFP_NET_CFG_RXR_PRIO: Per TX ring priority (1B entries) * @NFP_NET_CFG_RXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries) @@ -318,7 +330,7 @@ struct nfp_net_fw_ver { /* * General device stats (0x0d00 - 0x0d90) - * all counters are 64bit. + * All counters are 64bit. */ #define NFP_NET_CFG_STATS_BASE 0x0d00 #define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) @@ -352,7 +364,7 @@ struct nfp_net_fw_ver { /* * Per ring stats (0x1000 - 0x1800) - * options, 64bit per entry + * Options, 64bit per entry * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) */ @@ -363,9 +375,9 @@ struct nfp_net_fw_ver { #define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ ((_x) * 0x10)) -/** +/* * Mac stats (0x0000 - 0x0200) - * all counters are 64bit. + * All counters are 64bit. */ #define NFP_MAC_STATS_BASE 0x0000 #define NFP_MAC_STATS_SIZE 0x0200 @@ -430,11 +442,125 @@ struct nfp_net_fw_ver { #define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x1f0) #define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x1f8) -#define NFP_PF_CSR_SLICE_SIZE (32 * 1024) +/* + * General use mailbox area (0x1800 - 0x19ff) + * 4B used for update command and 4B return code followed by + * a max of 504B of variable length value. + */ +#define NFP_NET_CFG_MBOX_BASE 0x1800 +#define NFP_NET_CFG_MBOX_VAL 0x1808 +#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8 +#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0 +#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4 +#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8 + +#define NFP_NET_CFG_MBOX_CMD_IPSEC 3 + +/* + * TLV capabilities + * @NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV + * @NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV + * @NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV + * @NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments + * @NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV + * @NFP_NET_CFG_TLV_STATS_OFFSET: Length of TLV stats offset + * + * List of simple TLV structures, first one starts at @NFP_NET_CFG_TLV_BASE. + * Last structure must be of type @NFP_NET_CFG_TLV_TYPE_END. Presence of TLVs + * is indicated by @NFP_NET_CFG_TLV_BASE being non-zero. TLV structures may + * fill the entire remainder of the BAR or be shorter. FW must make sure TLVs + * don't conflict with other features which allocate space beyond + * @NFP_NET_CFG_TLV_BASE. @NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap + * space used by such features. + * + * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH. + */ +#define NFP_NET_CFG_TLV_TYPE 0x00 +#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000 +#define NFP_NET_CFG_TLV_LENGTH 0x02 +#define NFP_NET_CFG_TLV_LENGTH_INC 4 +#define NFP_NET_CFG_TLV_VALUE 0x04 +#define NFP_NET_CFG_TLV_STATS_OFFSET 0x08 + +#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000 +#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000 +#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff /* - * nfp_net_cfg_ctrl_rss() - Get RSS flag based on firmware's capability - * @hw_cap: The firmware's capabilities + * Capability TLV types + * + * @NFP_NET_CFG_TLV_TYPE_UNKNOWN: + * Special TLV type to catch bugs, should never be encountered. Drivers should + * treat encountering this type as error and refuse to probe. + * + * @NFP_NET_CFG_TLV_TYPE_RESERVED: + * Reserved space, may contain legacy fixed-offset fields, or be used for + * padding. The use of this type should be otherwise avoided. + * + * @NFP_NET_CFG_TLV_TYPE_END: + * Empty, end of TLV list. Must be the last TLV. Drivers will stop processing + * further TLVs when encountered. + * + * @NFP_NET_CFG_TLV_TYPE_ME_FREQ: + * Single word, ME frequency in MHz as used in calculation for + * @NFP_NET_CFG_RXR_IRQ_MOD and @NFP_NET_CFG_TXR_IRQ_MOD. + * + * @NFP_NET_CFG_TLV_TYPE_MBOX: + * Variable, mailbox area. Overwrites the default location which is + * @NFP_NET_CFG_MBOX_BASE and length @NFP_NET_CFG_MBOX_VAL_MAX_SZ. + * + * @NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0: + * @NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1: + * Variable, experimental IDs. IDs designated for internal development and + * experiments before a stable TLV ID has been allocated to a feature. Should + * never be present in production FW. + * + * @NFP_NET_CFG_TLV_TYPE_REPR_CAP: + * Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which + * can be used on representors. + * + * @NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES: + * Variable, bitmap of control message types supported by the mailbox handler. + * Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are + * encapsulated into simple TLVs, with an end TLV and written to the Mailbox. + * + * @NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS: + * 8 words, bitmaps of supported and enabled crypto operations. + * First 16B (4 words) contains a bitmap of supported crypto operations, + * and next 16B contain the enabled operations. + * This capability is obsoleted by ones with better sync methods. + * + * @NFP_NET_CFG_TLV_TYPE_VNIC_STATS: + * Variable, per-vNIC statistics, data should be 8B aligned (FW should insert + * zero-length RESERVED TLV to pad). + * TLV data has two sections. First is an array of statistics' IDs (2B each). + * Second 8B statistics themselves. Statistics are 8B aligned, meaning there + * may be a padding between sections. + * Number of statistics can be determined as floor(tlv.length / (2 + 8)). + * This TLV overwrites %NFP_NET_CFG_STATS_* values (statistics in this TLV + * duplicate the old ones, so driver should be careful not to unnecessarily + * render both). + * + * @NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN: + * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan + * RX sync, rather than kernel-assisted sync. + * + * @NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_LENGTH: + * CRYPTO OPS TLV should be at least 32B. + */ +#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 +#define NFP_NET_CFG_TLV_TYPE_RESERVED 1 +#define NFP_NET_CFG_TLV_TYPE_END 2 +#define NFP_NET_CFG_TLV_TYPE_MBOX 4 +#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10 + +int nfp_net_tlv_caps_parse(struct rte_eth_dev *dev); + +/** + * Get RSS flag based on firmware's capability + * + * @param hw_cap + * The firmware's capabilities */ static inline uint32_t nfp_net_cfg_ctrl_rss(uint32_t hw_cap) @@ -445,4 +571,4 @@ nfp_net_cfg_ctrl_rss(uint32_t hw_cap) return NFP_NET_CFG_CTRL_RSS; } -#endif /* _NFP_CTRL_H_ */ +#endif /* __NFP_CTRL_H__ */ diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c index e3ff3d80873..20940072fe6 100644 --- a/drivers/net/nfp/nfp_ethdev.c +++ b/drivers/net/nfp/nfp_ethdev.c @@ -5,40 +5,28 @@ * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include "eal_firmware.h" +#include "flower/nfp_flower.h" +#include "nfd3/nfp_nfd3.h" +#include "nfdk/nfp_nfdk.h" #include "nfpcore/nfp_cpp.h" -#include "nfpcore/nfp_nffw.h" #include "nfpcore/nfp_hwinfo.h" -#include "nfpcore/nfp_mip.h" #include "nfpcore/nfp_rtsym.h" #include "nfpcore/nfp_nsp.h" +#include "nfpcore/nfp6000_pcie.h" -#include "nfp_common.h" -#include "nfp_ctrl.h" -#include "nfp_rxtx.h" -#include "nfp_logs.h" #include "nfp_cpp_bridge.h" - -#include "nfd3/nfp_nfd3.h" -#include "nfdk/nfp_nfdk.h" -#include "flower/nfp_flower.h" +#include "nfp_ipsec.h" +#include "nfp_logs.h" static int -nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port) +nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, + uint16_t port) { + struct nfp_net_hw *hw; struct nfp_eth_table *nfp_eth_table; - struct nfp_net_hw *hw = NULL; /* Grab a pointer to the correct physical port */ hw = app_fw_nic->ports[port]; @@ -48,61 +36,63 @@ nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port) rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->mac_addr); free(nfp_eth_table); + return 0; } static int nfp_net_start(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = pci_dev->intr_handle; - uint32_t new_ctrl, update = 0; + int ret; + uint16_t i; + uint32_t new_ctrl; + uint32_t update = 0; uint32_t cap_extend; - uint32_t ctrl_extend = 0; + uint32_t intr_vector; struct nfp_net_hw *hw; + uint32_t ctrl_extend = 0; struct nfp_pf_dev *pf_dev; - struct nfp_app_fw_nic *app_fw_nic; struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; - uint32_t intr_vector; - int ret; + struct nfp_app_fw_nic *app_fw_nic; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); - PMD_INIT_LOG(DEBUG, "Start"); - /* Disabling queues just in case... */ nfp_net_disable_queues(dev); /* Enabling the required queues in the device */ nfp_net_enable_queues(dev); - /* check and configure queue intr-vector mapping */ + /* Check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) { if (app_fw_nic->multiport) { PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " - "with NFP multiport PF"); + "with NFP multiport PF"); return -EINVAL; } - if (rte_intr_type_get(intr_handle) == - RTE_INTR_HANDLE_UIO) { + + if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { /* * Better not to share LSC with RX interrupts. - * Unregistering LSC interrupt handler + * Unregistering LSC interrupt handler. */ - rte_intr_callback_unregister(pci_dev->intr_handle, - nfp_net_dev_interrupt_handler, (void *)dev); + rte_intr_callback_unregister(intr_handle, + nfp_net_dev_interrupt_handler, (void *)dev); if (dev->data->nb_rx_queues > 1) { PMD_INIT_LOG(ERR, "PMD rx interrupt only " - "supports 1 queue with UIO"); + "supports 1 queue with UIO"); return -EIO; } } + intr_vector = dev->data->nb_rx_queues; - if (rte_intr_efd_enable(intr_handle, intr_vector)) + if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) return -1; nfp_configure_rx_interrupt(dev, intr_handle); @@ -126,7 +116,7 @@ nfp_net_start(struct rte_eth_dev *dev) dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; - if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { + if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) { nfp_net_rss_config_default(dev); update |= NFP_NET_CFG_UPDATE_RSS; new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); @@ -138,31 +128,35 @@ nfp_net_start(struct rte_eth_dev *dev) update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; /* Enable vxlan */ - if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) { + if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) { new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; update |= NFP_NET_CFG_UPDATE_VXLAN; } - if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + if (nfp_net_reconfig(hw, new_ctrl, update) != 0) return -EIO; /* Enable packet type offload by extend ctrl word1. */ - cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); + cap_extend = hw->cap_ext; if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; + if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) + ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP + | NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP; + update = NFP_NET_CFG_UPDATE_GEN; - if (nfp_net_ext_reconfig(hw, ctrl_extend, update) < 0) + if (nfp_net_ext_reconfig(hw, ctrl_extend, update) != 0) return -EIO; /* * Allocating rte mbufs for configured rx queues. - * This requires queues being enabled before + * This requires queues being enabled before. */ - if (nfp_net_rx_freelist_setup(dev) < 0) { + if (nfp_net_rx_freelist_setup(dev) != 0) { ret = -ENOMEM; goto error; } @@ -171,11 +165,15 @@ nfp_net_start(struct rte_eth_dev *dev) /* Configure the physical port up */ nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); else - nfp_eth_set_configured(dev->process_private, - hw->nfp_idx, 1); + nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); hw->ctrl = new_ctrl; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; error: @@ -203,23 +201,19 @@ nfp_net_stop(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; - PMD_INIT_LOG(DEBUG, "Stop"); - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); nfp_net_disable_queues(dev); /* Clear queues */ nfp_net_stop_tx_queue(dev); - nfp_net_stop_rx_queue(dev); if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* Configure the physical port down */ nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); else - nfp_eth_set_configured(dev->process_private, - hw->nfp_idx, 0); + nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); return 0; } @@ -230,16 +224,13 @@ nfp_net_set_link_up(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; - PMD_DRV_LOG(DEBUG, "Set link up"); - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* Configure the physical port down */ return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); else - return nfp_eth_set_configured(dev->process_private, - hw->nfp_idx, 1); + return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); } /* Set the link down. */ @@ -248,33 +239,28 @@ nfp_net_set_link_down(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; - PMD_DRV_LOG(DEBUG, "Set link down"); - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* Configure the physical port down */ return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); else - return nfp_eth_set_configured(dev->process_private, - hw->nfp_idx, 0); + return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); } /* Reset and stop device. The device can not be restarted. */ static int nfp_net_close(struct rte_eth_dev *dev) { + uint8_t i; struct nfp_net_hw *hw; - struct rte_pci_device *pci_dev; struct nfp_pf_dev *pf_dev; + struct rte_pci_device *pci_dev; struct nfp_app_fw_nic *app_fw_nic; - int i; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - PMD_INIT_LOG(DEBUG, "Close"); - pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -284,34 +270,34 @@ nfp_net_close(struct rte_eth_dev *dev) * We assume that the DPDK application is stopping all the * threads/queues before calling the device close function. */ - nfp_net_disable_queues(dev); /* Clear queues */ nfp_net_close_tx_queue(dev); - nfp_net_close_rx_queue(dev); - /* Cancel possible impending LSC work here before releasing the port*/ - rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, - (void *)dev); + /* Clear ipsec */ + nfp_ipsec_uninit(dev); + + /* Cancel possible impending LSC work here before releasing the port */ + rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); /* Only free PF resources after all physical ports have been closed */ - /* Mark this port as unused and free device priv resources*/ + /* Mark this port as unused and free device priv resources */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); app_fw_nic->ports[hw->idx] = NULL; rte_eth_dev_release_port(dev); for (i = 0; i < app_fw_nic->total_phyports; i++) { /* Check to see if ports are still in use */ - if (app_fw_nic->ports[i]) + if (app_fw_nic->ports[i] != NULL) return 0; } /* Now it is safe to free all PF resources */ PMD_INIT_LOG(INFO, "Freeing PF resources"); nfp_cpp_area_free(pf_dev->ctrl_area); - nfp_cpp_area_free(pf_dev->hwqueues_area); + nfp_cpp_area_free(pf_dev->qc_area); free(pf_dev->hwinfo); free(pf_dev->sym_tbl); nfp_cpp_free(pf_dev->cpp); @@ -320,15 +306,10 @@ nfp_net_close(struct rte_eth_dev *dev) rte_intr_disable(pci_dev->intr_handle); - /* unregister callback func from eal lib */ + /* Unregister callback func from eal lib */ rte_intr_callback_unregister(pci_dev->intr_handle, nfp_net_dev_interrupt_handler, (void *)dev); - /* - * The ixgbe PMD disables the pcie master on the - * device. The i40e does not... - */ - return 0; } @@ -438,35 +419,35 @@ nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, /* Initialise and register driver with DPDK Application */ static const struct eth_dev_ops nfp_net_eth_dev_ops = { - .dev_configure = nfp_net_configure, - .dev_start = nfp_net_start, - .dev_stop = nfp_net_stop, - .dev_set_link_up = nfp_net_set_link_up, - .dev_set_link_down = nfp_net_set_link_down, - .dev_close = nfp_net_close, - .promiscuous_enable = nfp_net_promisc_enable, - .promiscuous_disable = nfp_net_promisc_disable, - .link_update = nfp_net_link_update, - .stats_get = nfp_net_stats_get, - .stats_reset = nfp_net_stats_reset, + .dev_configure = nfp_net_configure, + .dev_start = nfp_net_start, + .dev_stop = nfp_net_stop, + .dev_set_link_up = nfp_net_set_link_up, + .dev_set_link_down = nfp_net_set_link_down, + .dev_close = nfp_net_close, + .promiscuous_enable = nfp_net_promisc_enable, + .promiscuous_disable = nfp_net_promisc_disable, + .link_update = nfp_net_link_update, + .stats_get = nfp_net_stats_get, + .stats_reset = nfp_net_stats_reset, .xstats_get = nfp_net_xstats_get, .xstats_reset = nfp_net_xstats_reset, .xstats_get_names = nfp_net_xstats_get_names, .xstats_get_by_id = nfp_net_xstats_get_by_id, .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, - .dev_infos_get = nfp_net_infos_get, + .dev_infos_get = nfp_net_infos_get, .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, - .mtu_set = nfp_net_dev_mtu_set, - .mac_addr_set = nfp_net_set_mac_addr, - .vlan_offload_set = nfp_net_vlan_offload_set, - .reta_update = nfp_net_reta_update, - .reta_query = nfp_net_reta_query, - .rss_hash_update = nfp_net_rss_hash_update, - .rss_hash_conf_get = nfp_net_rss_hash_conf_get, - .rx_queue_setup = nfp_net_rx_queue_setup, - .rx_queue_release = nfp_net_rx_queue_release, - .tx_queue_setup = nfp_net_tx_queue_setup, - .tx_queue_release = nfp_net_tx_queue_release, + .mtu_set = nfp_net_dev_mtu_set, + .mac_addr_set = nfp_net_set_mac_addr, + .vlan_offload_set = nfp_net_vlan_offload_set, + .reta_update = nfp_net_reta_update, + .reta_query = nfp_net_reta_query, + .rss_hash_update = nfp_net_rss_hash_update, + .rss_hash_conf_get = nfp_net_rss_hash_conf_get, + .rx_queue_setup = nfp_net_rx_queue_setup, + .rx_queue_release = nfp_net_rx_queue_release, + .tx_queue_setup = nfp_net_tx_queue_setup, + .tx_queue_release = nfp_net_tx_queue_release, .rx_queue_intr_enable = nfp_rx_queue_intr_enable, .rx_queue_intr_disable = nfp_rx_queue_intr_disable, .udp_tunnel_port_add = nfp_udp_tunnel_port_add, @@ -491,18 +472,15 @@ nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, static int nfp_net_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; + int err; + uint16_t port; + uint64_t rx_base; + uint64_t tx_base; + struct nfp_net_hw *hw; struct nfp_pf_dev *pf_dev; + struct rte_pci_device *pci_dev; struct nfp_app_fw_nic *app_fw_nic; - struct nfp_net_hw *hw; struct rte_ether_addr *tmp_ether_addr; - uint64_t rx_bar_off = 0; - uint64_t tx_bar_off = 0; - uint32_t start_q; - int port = 0; - int err; - - PMD_INIT_FUNC_TRACE(); pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); @@ -513,27 +491,25 @@ nfp_net_init(struct rte_eth_dev *eth_dev) app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; - if (port < 0 || port > 7) { + if (port > 7) { PMD_DRV_LOG(ERR, "Port value is wrong"); return -ENODEV; } /* * Use PF array of physical ports to get pointer to - * this specific port + * this specific port. */ hw = app_fw_nic->ports[port]; - PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, " + PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " "NFP internal port number: %d", port, hw->nfp_idx); rte_eth_copy_pci_info(eth_dev, pci_dev); - hw->ctrl_bar = pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - PMD_DRV_LOG(ERR, - "hw->ctrl_bar is NULL. BAR0 not configured"); + PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured"); return -ENODEV; } @@ -548,13 +524,16 @@ nfp_net_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); return -EIO; } + hw->mac_stats = hw->mac_stats_bar; } else { if (pf_dev->ctrl_bar == NULL) return -ENODEV; + /* Use port offset in pf ctrl_bar for this ports control bar */ - hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE); - hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + (port * NFP_MAC_STATS_SIZE); + hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); + hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + + (hw->nfp_idx * NFP_MAC_STATS_SIZE); } PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); @@ -564,6 +543,18 @@ nfp_net_init(struct rte_eth_dev *eth_dev) if (err != 0) return err; + err = nfp_net_tlv_caps_parse(eth_dev); + if (err != 0) { + PMD_INIT_LOG(ERR, "Failed to parser TLV caps"); + return err; + } + + err = nfp_ipsec_init(eth_dev); + if (err != 0) { + PMD_INIT_LOG(ERR, "Failed to init IPsec module"); + return err; + } + nfp_net_ethdev_ops_mount(hw, eth_dev); hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * @@ -574,37 +565,22 @@ nfp_net_init(struct rte_eth_dev *eth_dev) return -ENOMEM; } - /* Work out where in the BAR the queues start. */ - switch (pci_dev->id.device_id) { - case PCI_DEVICE_ID_NFP3800_PF_NIC: - case PCI_DEVICE_ID_NFP4000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_PF_NIC: - start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = nfp_pci_queue(pci_dev, start_q); - start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = nfp_pci_queue(pci_dev, start_q); - break; - default: - PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); - return -ENODEV; - } + tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); + rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); - PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); - - hw->tx_bar = pf_dev->hw_queues + tx_bar_off; - hw->rx_bar = pf_dev->hw_queues + rx_bar_off; + hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; + hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; eth_dev->data->dev_private = hw; PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", - hw->ctrl_bar, hw->tx_bar, hw->rx_bar); + hw->ctrl_bar, hw->tx_bar, hw->rx_bar); nfp_net_cfg_queue_setup(hw); hw->mtu = RTE_ETHER_MTU; /* VLAN insertion is incompatible with LSOv2 */ - if (hw->cap & NFP_NET_CFG_CTRL_LSO2) + if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0) hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; nfp_net_log_device_information(hw); @@ -613,8 +589,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) rte_spinlock_init(&hw->reconfig_lock); /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", - RTE_ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); return -ENOMEM; @@ -624,7 +599,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); tmp_ether_addr = &hw->mac_addr; - if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { + if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) { PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); /* Using random mac addresses for VFs */ rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); @@ -639,11 +614,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev) eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " - "mac=" RTE_ETHER_ADDR_PRT_FMT, - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id, - RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); + PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x " + "mac=" RTE_ETHER_ADDR_PRT_FMT, + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, + RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); /* Registering LSC interrupt handler */ rte_intr_callback_register(pci_dev->intr_handle, @@ -659,29 +634,38 @@ nfp_net_init(struct rte_eth_dev *eth_dev) #define DEFAULT_FW_PATH "/lib/firmware/netronome" static int -nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +nfp_fw_upload(struct rte_pci_device *dev, + struct nfp_nsp *nsp, + char *card) { - struct nfp_cpp *cpp = nsp->cpp; void *fw_buf; - char fw_name[125]; - char serial[40]; size_t fsize; + char serial[40]; + char fw_name[125]; + uint16_t interface; + uint32_t cpp_serial_len; + const uint8_t *cpp_serial; + struct nfp_cpp *cpp = nfp_nsp_cpp(nsp); + + cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial); + if (cpp_serial_len != NFP_SERIAL_LEN) + return -ERANGE; + + interface = nfp_cpp_interface(cpp); /* Looking for firmware file in order of priority */ /* First try to find a firmware image specific for this device */ snprintf(serial, sizeof(serial), "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", - cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], - cpp->serial[4], cpp->serial[5], cpp->interface >> 8, - cpp->interface & 0xff); - - snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, - serial); + cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], + cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); + snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) goto load_fw; + /* Then try the PCI name */ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->name); @@ -693,14 +677,15 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) /* Finally try the card type and media */ snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); - if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) { - PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); - return -ENOENT; - } + if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) + goto load_fw; + + PMD_DRV_LOG(ERR, "Can't find suitable firmware."); + return -ENOENT; load_fw: PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", - fw_name, fsize); + fw_name, fsize); PMD_DRV_LOG(INFO, "Uploading the firmware ..."); nfp_nsp_load_fw(nsp, fw_buf, fsize); PMD_DRV_LOG(INFO, "Done"); @@ -716,16 +701,16 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo) { + int err; + char card_desc[100]; struct nfp_nsp *nsp; const char *nfp_fw_model; - char card_desc[100]; - int err = 0; nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); if (nfp_fw_model == NULL) nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); - if (nfp_fw_model) { + if (nfp_fw_model != NULL) { PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); } else { PMD_DRV_LOG(ERR, "firmware model NOT found"); @@ -734,7 +719,7 @@ nfp_fw_setup(struct rte_pci_device *dev, if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", - nfp_eth_table->count); + nfp_eth_table->count); return -EIO; } @@ -761,12 +746,12 @@ nfp_fw_setup(struct rte_pci_device *dev, } static int -nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) +nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev, + const struct nfp_dev_info *dev_info) { - int i; - int ret; - int err = 0; - int total_vnics; + uint8_t i; + int ret = 0; + uint32_t total_vnics; struct nfp_net_hw *hw; unsigned int numa_node; struct rte_eth_dev *eth_dev; @@ -786,8 +771,8 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) pf_dev->app_fw_priv = app_fw_nic; /* Read the number of vNIC's created for the PF */ - total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err); - if (err != 0 || total_vnics <= 0 || total_vnics > 8) { + total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &ret); + if (ret != 0 || total_vnics == 0 || total_vnics > 8) { PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); ret = -ENODEV; goto app_cleanup; @@ -795,15 +780,15 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) /* * For coreNIC the number of vNICs exposed should be the same as the - * number of physical ports + * number of physical ports. */ - if (total_vnics != (int)nfp_eth_table->count) { + if (total_vnics != nfp_eth_table->count) { PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); ret = -ENODEV; goto app_cleanup; } - /* Populate coreNIC app properties*/ + /* Populate coreNIC app properties */ app_fw_nic->total_phyports = total_vnics; app_fw_nic->pf_dev = pf_dev; if (total_vnics > 1) @@ -825,7 +810,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) numa_node = rte_socket_id(); for (i = 0; i < app_fw_nic->total_phyports; i++) { snprintf(port_name, sizeof(port_name), "%s_port%d", - pf_dev->pci_dev->device.name, i); + pf_dev->pci_dev->device.name, i); /* Allocate a eth_dev for this phyport */ eth_dev = rte_eth_dev_allocate(port_name); @@ -835,8 +820,8 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) } /* Allocate memory for this phyport */ - eth_dev->data->dev_private = - rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw), + eth_dev->data->dev_private = rte_zmalloc_socket(port_name, + sizeof(struct nfp_net_hw), RTE_CACHE_LINE_SIZE, numa_node); if (eth_dev->data->dev_private == NULL) { ret = -ENOMEM; @@ -849,6 +834,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) /* Add this device to the PF's array of physical ports */ app_fw_nic->ports[i] = hw; + hw->dev_info = dev_info; hw->pf_dev = pf_dev; hw->cpp = pf_dev->cpp; hw->eth_dev = eth_dev; @@ -857,11 +843,12 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) eth_dev->device = &pf_dev->pci_dev->device; - /* ctrl/tx/rx BAR mappings and remaining init happens in - * nfp_net_init + /* + * Ctrl/tx/rx BAR mappings and remaining init happens in + * @nfp_net_init() */ ret = nfp_net_init(eth_dev); - if (ret) { + if (ret != 0) { ret = -ENODEV; goto port_cleanup; } @@ -874,9 +861,11 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) port_cleanup: for (i = 0; i < app_fw_nic->total_phyports; i++) { - if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) { + if (app_fw_nic->ports[i] != NULL && + app_fw_nic->ports[i]->eth_dev != NULL) { struct rte_eth_dev *tmp_dev; tmp_dev = app_fw_nic->ports[i]->eth_dev; + nfp_ipsec_uninit(tmp_dev); rte_eth_dev_release_port(tmp_dev); app_fw_nic->ports[i] = NULL; } @@ -891,21 +880,27 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) static int nfp_pf_init(struct rte_pci_device *pci_dev) { - int ret; - int err = 0; + int ret = 0; uint64_t addr; uint32_t cpp_id; struct nfp_cpp *cpp; - enum nfp_app_fw_id app_fw_id; struct nfp_pf_dev *pf_dev; struct nfp_hwinfo *hwinfo; + enum nfp_app_fw_id app_fw_id; char name[RTE_ETH_NAME_MAX_LEN]; struct nfp_rtsym_table *sym_tbl; struct nfp_eth_table *nfp_eth_table; + const struct nfp_dev_info *dev_info; if (pci_dev == NULL) return -ENODEV; + dev_info = nfp_dev_info_get(pci_dev->id.device_id); + if (dev_info == NULL) { + PMD_INIT_LOG(ERR, "Not supported device ID"); + return -ENODEV; + } + /* * When device bound to UIO, the device could be used, by mistake, * by two DPDK apps, and the UIO driver does not avoid it. This @@ -914,9 +909,9 @@ nfp_pf_init(struct rte_pci_device *pci_dev) * use a lock file if UIO is being used. */ if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) - cpp = nfp_cpp_from_device_name(pci_dev, 0); + cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); else - cpp = nfp_cpp_from_device_name(pci_dev, 1); + cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); if (cpp == NULL) { PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); @@ -938,7 +933,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto hwinfo_cleanup; } - if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { + if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo) != 0) { PMD_INIT_LOG(ERR, "Error when uploading firmware"); ret = -EIO; goto eth_table_cleanup; @@ -947,15 +942,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev) /* Now the symbol table should be there */ sym_tbl = nfp_rtsym_table_read(cpp); if (sym_tbl == NULL) { - PMD_INIT_LOG(ERR, "Something is wrong with the firmware" - " symbol table"); + PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); ret = -EIO; goto eth_table_cleanup; } /* Read the app ID of the firmware loaded */ - app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); - if (err != 0) { + app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret); + if (ret != 0) { PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); ret = -EIO; goto sym_tbl_cleanup; @@ -977,43 +971,28 @@ nfp_pf_init(struct rte_pci_device *pci_dev) pf_dev->pci_dev = pci_dev; pf_dev->nfp_eth_table = nfp_eth_table; - /* configure access to tx/rx vNIC BARs */ - switch (pci_dev->id.device_id) { - case PCI_DEVICE_ID_NFP3800_PF_NIC: - addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET, - 0, NFP_PCIE_QUEUE_NFP3800_MASK); - break; - case PCI_DEVICE_ID_NFP4000_PF_NIC: - case PCI_DEVICE_ID_NFP6000_PF_NIC: - addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET, - 0, NFP_PCIE_QUEUE_NFP6000_MASK); - break; - default: - PMD_INIT_LOG(ERR, "nfp_net: no device ID matching"); - ret = -ENODEV; - goto pf_cleanup; - } - + /* Configure access to tx/rx vNIC BARs */ + addr = nfp_qcp_queue_offset(dev_info, 0); cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); - pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, cpp_id, - addr, NFP_QCP_QUEUE_AREA_SZ, - &pf_dev->hwqueues_area); - if (pf_dev->hw_queues == NULL) { + + pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, + addr, dev_info->qc_area_sz, &pf_dev->qc_area); + if (pf_dev->qc_bar == NULL) { PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); ret = -EIO; goto pf_cleanup; } - PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues); + PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); /* * PF initialization has been done at this point. Call app specific - * init code now + * init code now. */ switch (pf_dev->app_fw_id) { case NFP_APP_FW_CORE_NIC: PMD_INIT_LOG(INFO, "Initializing coreNIC"); - ret = nfp_init_app_fw_nic(pf_dev); + ret = nfp_init_app_fw_nic(pf_dev, dev_info); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); goto hwqueues_cleanup; @@ -1021,7 +1000,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) break; case NFP_APP_FW_FLOWER_NIC: PMD_INIT_LOG(INFO, "Initializing Flower"); - ret = nfp_init_app_fw_flower(pf_dev); + ret = nfp_init_app_fw_flower(pf_dev, dev_info); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not initialize Flower!"); goto hwqueues_cleanup; @@ -1033,7 +1012,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto hwqueues_cleanup; } - /* register the CPP bridge service here for primary use */ + /* Register the CPP bridge service here for primary use */ ret = nfp_enable_cpp_service(pf_dev); if (ret != 0) PMD_INIT_LOG(INFO, "Enable cpp service failed."); @@ -1041,7 +1020,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) return 0; hwqueues_cleanup: - nfp_cpp_area_free(pf_dev->hwqueues_area); + nfp_cpp_area_free(pf_dev->qc_area); pf_cleanup: rte_free(pf_dev); sym_tbl_cleanup: @@ -1061,15 +1040,15 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, struct nfp_rtsym_table *sym_tbl, struct nfp_cpp *cpp) { - int i; + uint32_t i; int err = 0; int ret = 0; - int total_vnics; + uint32_t total_vnics; struct nfp_net_hw *hw; /* Read the number of vNIC's created for the PF */ total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); - if (err != 0 || total_vnics <= 0 || total_vnics > 8) { + if (err != 0 || total_vnics == 0 || total_vnics > 8) { PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); return -ENODEV; } @@ -1077,7 +1056,7 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, for (i = 0; i < total_vnics; i++) { struct rte_eth_dev *eth_dev; char port_name[RTE_ETH_NAME_MAX_LEN]; - snprintf(port_name, sizeof(port_name), "%s_port%d", + snprintf(port_name, sizeof(port_name), "%s_port%u", pci_dev->device.name, i); PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); @@ -1101,20 +1080,26 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, /* * When attaching to the NFP4000/6000 PF on a secondary process there * is no need to initialise the PF again. Only minimal work is required - * here + * here. */ static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev) { - int err = 0; int ret = 0; struct nfp_cpp *cpp; enum nfp_app_fw_id app_fw_id; struct nfp_rtsym_table *sym_tbl; + const struct nfp_dev_info *dev_info; if (pci_dev == NULL) return -ENODEV; + dev_info = nfp_dev_info_get(pci_dev->id.device_id); + if (dev_info == NULL) { + PMD_INIT_LOG(ERR, "Not supported device ID"); + return -ENODEV; + } + /* * When device bound to UIO, the device could be used, by mistake, * by two DPDK apps, and the UIO driver does not avoid it. This @@ -1123,9 +1108,9 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) * use a lock file if UIO is being used. */ if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) - cpp = nfp_cpp_from_device_name(pci_dev, 0); + cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); else - cpp = nfp_cpp_from_device_name(pci_dev, 1); + cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); if (cpp == NULL) { PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); @@ -1134,18 +1119,17 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) /* * We don't have access to the PF created in the primary process - * here so we have to read the number of ports from firmware + * here so we have to read the number of ports from firmware. */ sym_tbl = nfp_rtsym_table_read(cpp); if (sym_tbl == NULL) { - PMD_INIT_LOG(ERR, "Something is wrong with the firmware" - " symbol table"); + PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); return -EIO; } /* Read the app ID of the firmware loaded */ - app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); - if (err != 0) { + app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret); + if (ret != 0) { PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); goto sym_tbl_cleanup; } @@ -1192,27 +1176,27 @@ nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP3800_PF_NIC) + PCI_DEVICE_ID_NFP3800_PF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP4000_PF_NIC) + PCI_DEVICE_ID_NFP4000_PF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP6000_PF_NIC) + PCI_DEVICE_ID_NFP6000_PF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, - PCI_DEVICE_ID_NFP3800_PF_NIC) + PCI_DEVICE_ID_NFP3800_PF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, - PCI_DEVICE_ID_NFP4000_PF_NIC) + PCI_DEVICE_ID_NFP4000_PF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, - PCI_DEVICE_ID_NFP6000_PF_NIC) + PCI_DEVICE_ID_NFP6000_PF_NIC) }, { .vendor_id = 0, @@ -1222,8 +1206,8 @@ static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { static int nfp_pci_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; uint16_t port_id; + struct rte_pci_device *pci_dev; pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); @@ -1232,7 +1216,7 @@ nfp_pci_uninit(struct rte_eth_dev *eth_dev) rte_eth_dev_close(port_id); /* * Ports can be closed and freed but hotplugging is not - * currently supported + * currently supported. */ return -ENOTSUP; } diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c index eaf815d06da..3db35b90e8f 100644 --- a/drivers/net/nfp/nfp_ethdev_vf.c +++ b/drivers/net/nfp/nfp_ethdev_vf.c @@ -7,15 +7,12 @@ #include -#include "nfpcore/nfp_mip.h" -#include "nfpcore/nfp_rtsym.h" +#include "nfd3/nfp_nfd3.h" +#include "nfdk/nfp_nfdk.h" +#include "nfpcore/nfp_cpp.h" #include "nfp_common.h" -#include "nfp_ctrl.h" -#include "nfp_rxtx.h" #include "nfp_logs.h" -#include "nfd3/nfp_nfd3.h" -#include "nfdk/nfp_nfdk.h" static void nfp_netvf_read_mac(struct nfp_net_hw *hw) @@ -32,18 +29,16 @@ nfp_netvf_read_mac(struct nfp_net_hw *hw) static int nfp_netvf_start(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = pci_dev->intr_handle; - uint32_t new_ctrl, update = 0; + int ret; + uint16_t i; + uint32_t new_ctrl; + uint32_t update = 0; + uint32_t intr_vector; struct nfp_net_hw *hw; struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; - uint32_t intr_vector; - int ret; - - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - PMD_INIT_LOG(DEBUG, "Start"); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; /* Disabling queues just in case... */ nfp_net_disable_queues(dev); @@ -51,25 +46,25 @@ nfp_netvf_start(struct rte_eth_dev *dev) /* Enabling the required queues in the device */ nfp_net_enable_queues(dev); - /* check and configure queue intr-vector mapping */ + /* Check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) { - if (rte_intr_type_get(intr_handle) == - RTE_INTR_HANDLE_UIO) { + if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { /* * Better not to share LSC with RX interrupts. - * Unregistering LSC interrupt handler + * Unregistering LSC interrupt handler. */ - rte_intr_callback_unregister(pci_dev->intr_handle, - nfp_net_dev_interrupt_handler, (void *)dev); + rte_intr_callback_unregister(intr_handle, + nfp_net_dev_interrupt_handler, (void *)dev); if (dev->data->nb_rx_queues > 1) { PMD_INIT_LOG(ERR, "PMD rx interrupt only " - "supports 1 queue with UIO"); + "supports 1 queue with UIO"); return -EIO; } } + intr_vector = dev->data->nb_rx_queues; - if (rte_intr_efd_enable(intr_handle, intr_vector)) + if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) return -1; nfp_configure_rx_interrupt(dev, intr_handle); @@ -81,12 +76,13 @@ nfp_netvf_start(struct rte_eth_dev *dev) new_ctrl = nfp_check_offloads(dev); /* Writing configuration parameters in the device */ + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); nfp_net_params_setup(hw); dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; - if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { + if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) { nfp_net_rss_config_default(dev); update |= NFP_NET_CFG_UPDATE_RSS; new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); @@ -97,24 +93,29 @@ nfp_netvf_start(struct rte_eth_dev *dev) update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; - if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + if (nfp_net_reconfig(hw, new_ctrl, update) != 0) return -EIO; /* * Allocating rte mbufs for configured rx queues. - * This requires queues being enabled before + * This requires queues being enabled before. */ - if (nfp_net_rx_freelist_setup(dev) < 0) { + if (nfp_net_rx_freelist_setup(dev) != 0) { ret = -ENOMEM; goto error; } hw->ctrl = new_ctrl; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; error: @@ -139,8 +140,6 @@ nfp_netvf_start(struct rte_eth_dev *dev) static int nfp_netvf_stop(struct rte_eth_dev *dev) { - PMD_INIT_LOG(DEBUG, "Stop"); - nfp_net_disable_queues(dev); /* Clear queues */ @@ -173,72 +172,61 @@ nfp_netvf_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - PMD_INIT_LOG(DEBUG, "Close"); - pci_dev = RTE_ETH_DEV_TO_PCI(dev); /* * We assume that the DPDK application is stopping all the * threads/queues before calling the device close function. */ - nfp_net_disable_queues(dev); /* Clear queues */ nfp_net_close_tx_queue(dev); - nfp_net_close_rx_queue(dev); rte_intr_disable(pci_dev->intr_handle); - /* unregister callback func from eal lib */ + /* Unregister callback func from eal lib */ rte_intr_callback_unregister(pci_dev->intr_handle, - nfp_net_dev_interrupt_handler, - (void *)dev); + nfp_net_dev_interrupt_handler, (void *)dev); - /* Cancel possible impending LSC work here before releasing the port*/ - rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, - (void *)dev); - - /* - * The ixgbe PMD disables the pcie master on the - * device. The i40e does not... - */ + /* Cancel possible impending LSC work here before releasing the port */ + rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); return 0; } /* Initialise and register VF driver with DPDK Application */ static const struct eth_dev_ops nfp_netvf_eth_dev_ops = { - .dev_configure = nfp_net_configure, - .dev_start = nfp_netvf_start, - .dev_stop = nfp_netvf_stop, - .dev_set_link_up = nfp_netvf_set_link_up, - .dev_set_link_down = nfp_netvf_set_link_down, - .dev_close = nfp_netvf_close, - .promiscuous_enable = nfp_net_promisc_enable, - .promiscuous_disable = nfp_net_promisc_disable, - .link_update = nfp_net_link_update, - .stats_get = nfp_net_stats_get, - .stats_reset = nfp_net_stats_reset, + .dev_configure = nfp_net_configure, + .dev_start = nfp_netvf_start, + .dev_stop = nfp_netvf_stop, + .dev_set_link_up = nfp_netvf_set_link_up, + .dev_set_link_down = nfp_netvf_set_link_down, + .dev_close = nfp_netvf_close, + .promiscuous_enable = nfp_net_promisc_enable, + .promiscuous_disable = nfp_net_promisc_disable, + .link_update = nfp_net_link_update, + .stats_get = nfp_net_stats_get, + .stats_reset = nfp_net_stats_reset, .xstats_get = nfp_net_xstats_get, .xstats_reset = nfp_net_xstats_reset, .xstats_get_names = nfp_net_xstats_get_names, .xstats_get_by_id = nfp_net_xstats_get_by_id, .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, - .dev_infos_get = nfp_net_infos_get, + .dev_infos_get = nfp_net_infos_get, .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, - .mtu_set = nfp_net_dev_mtu_set, - .mac_addr_set = nfp_net_set_mac_addr, - .vlan_offload_set = nfp_net_vlan_offload_set, - .reta_update = nfp_net_reta_update, - .reta_query = nfp_net_reta_query, - .rss_hash_update = nfp_net_rss_hash_update, - .rss_hash_conf_get = nfp_net_rss_hash_conf_get, - .rx_queue_setup = nfp_net_rx_queue_setup, - .rx_queue_release = nfp_net_rx_queue_release, - .tx_queue_setup = nfp_net_tx_queue_setup, - .tx_queue_release = nfp_net_tx_queue_release, + .mtu_set = nfp_net_dev_mtu_set, + .mac_addr_set = nfp_net_set_mac_addr, + .vlan_offload_set = nfp_net_vlan_offload_set, + .reta_update = nfp_net_reta_update, + .reta_query = nfp_net_reta_query, + .rss_hash_update = nfp_net_rss_hash_update, + .rss_hash_conf_get = nfp_net_rss_hash_conf_get, + .rx_queue_setup = nfp_net_rx_queue_setup, + .rx_queue_release = nfp_net_rx_queue_release, + .tx_queue_setup = nfp_net_tx_queue_setup, + .tx_queue_release = nfp_net_tx_queue_release, .rx_queue_intr_enable = nfp_rx_queue_intr_enable, .rx_queue_intr_disable = nfp_rx_queue_intr_disable, }; @@ -260,25 +248,30 @@ nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, static int nfp_netvf_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; - struct nfp_net_hw *hw; - struct rte_ether_addr *tmp_ether_addr; - - uint64_t tx_bar_off = 0, rx_bar_off = 0; - uint32_t start_q; - int port = 0; int err; + uint16_t port; + uint32_t start_q; + struct nfp_net_hw *hw; + uint64_t tx_bar_off = 0; + uint64_t rx_bar_off = 0; + struct rte_pci_device *pci_dev; + const struct nfp_dev_info *dev_info; - PMD_INIT_FUNC_TRACE(); - + port = eth_dev->data->port_id; pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + dev_info = nfp_dev_info_get(pci_dev->id.device_id); + if (dev_info == NULL) { + PMD_INIT_LOG(ERR, "Not supported device ID"); + return -ENODEV; + } + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + hw->dev_info = dev_info; hw->ctrl_bar = pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - PMD_DRV_LOG(ERR, - "hw->ctrl_bar is NULL. BAR0 not configured"); + PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured"); return -ENODEV; } @@ -296,45 +289,31 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) rte_eth_copy_pci_info(eth_dev, pci_dev); - hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * - nfp_net_xstats_size(eth_dev), 0); + hw->eth_xstats_base = rte_malloc("rte_eth_xstat", + sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0); if (hw->eth_xstats_base == NULL) { - PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", + PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!", pci_dev->device.name); return -ENOMEM; } /* Work out where in the BAR the queues start. */ - switch (pci_dev->id.device_id) { - case PCI_DEVICE_ID_NFP3800_VF_NIC: - case PCI_DEVICE_ID_NFP6000_VF_NIC: - start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = nfp_pci_queue(pci_dev, start_q); - start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = nfp_pci_queue(pci_dev, start_q); - break; - default: - PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); - err = -ENODEV; - goto dev_err_ctrl_map; - } + start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); + tx_bar_off = nfp_qcp_queue_offset(dev_info, start_q); + start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); + rx_bar_off = nfp_qcp_queue_offset(dev_info, start_q); - PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); - PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); - - hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + - tx_bar_off; - hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + - rx_bar_off; + hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off; + hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off; PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", - hw->ctrl_bar, hw->tx_bar, hw->rx_bar); + hw->ctrl_bar, hw->tx_bar, hw->rx_bar); nfp_net_cfg_queue_setup(hw); hw->mtu = RTE_ETHER_MTU; /* VLAN insertion is incompatible with LSOv2 */ - if (hw->cap & NFP_NET_CFG_CTRL_LSO2) + if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0) hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; nfp_net_log_device_information(hw); @@ -343,8 +322,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) rte_spinlock_init(&hw->reconfig_lock); /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", - RTE_ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); err = -ENOMEM; @@ -352,11 +330,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) } nfp_netvf_read_mac(hw); - - tmp_ether_addr = &hw->mac_addr; - if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { - PMD_INIT_LOG(INFO, "Using random mac address for port %d", - port); + if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { + PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port); /* Using random mac addresses for VFs */ rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); @@ -370,17 +345,16 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " - "mac=" RTE_ETHER_ADDR_PRT_FMT, - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id, - RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); + PMD_INIT_LOG(INFO, "port %hu VendorID=%#x DeviceID=%#x " + "mac=" RTE_ETHER_ADDR_PRT_FMT, + port, pci_dev->id.vendor_id, + pci_dev->id.device_id, + RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { /* Registering LSC interrupt handler */ rte_intr_callback_register(pci_dev->intr_handle, - nfp_net_dev_interrupt_handler, - (void *)eth_dev); + nfp_net_dev_interrupt_handler, (void *)eth_dev); /* Telling the firmware about the LSC interrupt entry */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); /* Recording current stats counters values */ @@ -398,39 +372,42 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) static const struct rte_pci_id pci_id_nfp_vf_net_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP3800_VF_NIC) + PCI_DEVICE_ID_NFP3800_VF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP6000_VF_NIC) + PCI_DEVICE_ID_NFP6000_VF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, - PCI_DEVICE_ID_NFP3800_VF_NIC) + PCI_DEVICE_ID_NFP3800_VF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, - PCI_DEVICE_ID_NFP6000_VF_NIC) + PCI_DEVICE_ID_NFP6000_VF_NIC) }, { .vendor_id = 0, }, }; -static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev) +static int +nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev) { /* VF cleanup, just free private port data */ return nfp_netvf_close(eth_dev); } -static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) +static int +nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct nfp_net_adapter), nfp_netvf_init); + sizeof(struct nfp_net_adapter), nfp_netvf_init); } -static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev) +static int +nfp_vf_pci_remove(struct rte_pci_device *pci_dev) { return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit); } @@ -438,8 +415,8 @@ static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_nfp_net_vf_pmd = { .id_table = pci_id_nfp_vf_net_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = eth_nfp_vf_pci_probe, - .remove = eth_nfp_vf_pci_remove, + .probe = nfp_vf_pci_probe, + .remove = nfp_vf_pci_remove, }; RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd); diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c index 3ed78eab570..1bf31146fcd 100644 --- a/drivers/net/nfp/nfp_flow.c +++ b/drivers/net/nfp/nfp_flow.c @@ -3,34 +3,92 @@ * All rights reserved. */ +#include "nfp_flow.h" + #include #include #include -#include #include -#include "nfp_common.h" -#include "nfp_ctrl.h" -#include "nfp_flow.h" -#include "nfp_logs.h" -#include "nfp_rxtx.h" -#include "flower/nfp_flower.h" -#include "flower/nfp_flower_cmsg.h" -#include "flower/nfp_flower_ctrl.h" +#include "flower/nfp_conntrack.h" #include "flower/nfp_flower_representor.h" -#include "nfpcore/nfp_mip.h" #include "nfpcore/nfp_rtsym.h" - -/* - * Maximum number of items in struct rte_flow_action_vxlan_encap. - * ETH / IPv4(6) / UDP / VXLAN / END - */ -#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5 - -struct vxlan_data { - struct rte_flow_action_vxlan_encap conf; - struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM]; -}; +#include "nfp_logs.h" +#include "nfp_mtr.h" + +#define NFP_FLOWER_LAYER_EXT_META RTE_BIT32(0) +#define NFP_FLOWER_LAYER_PORT RTE_BIT32(1) +#define NFP_FLOWER_LAYER_MAC RTE_BIT32(2) +#define NFP_FLOWER_LAYER_TP RTE_BIT32(3) +#define NFP_FLOWER_LAYER_IPV4 RTE_BIT32(4) +#define NFP_FLOWER_LAYER_IPV6 RTE_BIT32(5) +#define NFP_FLOWER_LAYER_CT RTE_BIT32(6) +#define NFP_FLOWER_LAYER_VXLAN RTE_BIT32(7) + +#define NFP_FLOWER_LAYER2_GRE RTE_BIT32(0) +#define NFP_FLOWER_LAYER2_QINQ RTE_BIT32(4) +#define NFP_FLOWER_LAYER2_GENEVE RTE_BIT32(5) +#define NFP_FLOWER_LAYER2_GENEVE_OP RTE_BIT32(6) +#define NFP_FLOWER_LAYER2_TUN_IPV6 RTE_BIT32(7) + +/* Compressed HW representation of TCP Flags */ +#define NFP_FL_TCP_FLAG_FIN RTE_BIT32(0) +#define NFP_FL_TCP_FLAG_SYN RTE_BIT32(1) +#define NFP_FL_TCP_FLAG_RST RTE_BIT32(2) +#define NFP_FL_TCP_FLAG_PSH RTE_BIT32(3) +#define NFP_FL_TCP_FLAG_URG RTE_BIT32(4) + +#define NFP_FL_META_FLAG_MANAGE_MASK RTE_BIT32(7) + +#define NFP_FLOWER_MASK_VLAN_CFI RTE_BIT32(12) + +#define NFP_MASK_TABLE_ENTRIES 1024 + +/* The maximum action list size (in bytes) supported by the NFP. */ +#define NFP_FL_MAX_A_SIZ 1216 + +#define NFP_FL_SC_ACT_DROP 0x80000000 +#define NFP_FL_SC_ACT_USER 0x7D000000 +#define NFP_FL_SC_ACT_POPV 0x6A000000 +#define NFP_FL_SC_ACT_NULL 0x00000000 + +/* GRE Tunnel flags */ +#define NFP_FL_GRE_FLAG_KEY (1 << 2) + +/* Action opcodes */ +#define NFP_FL_ACTION_OPCODE_OUTPUT 0 +#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 +#define NFP_FL_ACTION_OPCODE_POP_VLAN 2 +#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3 +#define NFP_FL_ACTION_OPCODE_POP_MPLS 4 +#define NFP_FL_ACTION_OPCODE_USERSPACE 5 +#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6 +#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 +#define NFP_FL_ACTION_OPCODE_SET_MPLS 8 +#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 +#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13 +#define NFP_FL_ACTION_OPCODE_SET_UDP 14 +#define NFP_FL_ACTION_OPCODE_SET_TCP 15 +#define NFP_FL_ACTION_OPCODE_PRE_LAG 16 +#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 +#define NFP_FL_ACTION_OPCODE_PRE_GS 18 +#define NFP_FL_ACTION_OPCODE_GS 19 +#define NFP_FL_ACTION_OPCODE_PUSH_NSH 20 +#define NFP_FL_ACTION_OPCODE_POP_NSH 21 +#define NFP_FL_ACTION_OPCODE_SET_QUEUE 22 +#define NFP_FL_ACTION_OPCODE_CONNTRACK 23 +#define NFP_FL_ACTION_OPCODE_METER 24 +#define NFP_FL_ACTION_OPCODE_CT_NAT_EXT 25 +#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26 +#define NFP_FL_ACTION_OPCODE_NUM 32 + +#define NFP_FL_OUT_FLAGS_LAST RTE_BIT32(15) + +/* Tunnel ports */ +#define NFP_FL_PORT_TYPE_TUN 0x50000000 /* Static initializer for a list of subsequent item types */ #define NEXT_ITEM(...) \ @@ -50,21 +108,21 @@ struct vxlan_data { #define NVGRE_V4_LEN (sizeof(struct rte_ether_hdr) + \ sizeof(struct rte_ipv4_hdr) + \ sizeof(struct rte_flow_item_gre) + \ - sizeof(rte_be32_t)) /* gre key */ + sizeof(rte_be32_t)) /* Gre key */ #define NVGRE_V6_LEN (sizeof(struct rte_ether_hdr) + \ sizeof(struct rte_ipv6_hdr) + \ sizeof(struct rte_flow_item_gre) + \ - sizeof(rte_be32_t)) /* gre key */ + sizeof(rte_be32_t)) /* Gre key */ /* Process structure associated with a flow item */ struct nfp_flow_item_proc { - /* Bit-mask for fields supported by this PMD. */ + /** Bit-mask for fields supported by this PMD. */ const void *mask_support; - /* Bit-mask to use when @p item->mask is not provided. */ + /** Bit-mask to use when @p item->mask is not provided. */ const void *mask_default; - /* Size in bytes for @p mask_support and @p mask_default. */ - const unsigned int mask_sz; - /* Merge a pattern item into a flow rule handle. */ + /** Size in bytes for @p mask_support and @p mask_default. */ + const size_t mask_sz; + /** Merge a pattern item into a flow rule handle. */ int (*merge)(struct nfp_app_fw_flower *app_fw_flower, struct rte_flow *nfp_flow, char **mbuf_off, @@ -72,7 +130,7 @@ struct nfp_flow_item_proc { const struct nfp_flow_item_proc *proc, bool is_mask, bool is_outer_layer); - /* List of possible subsequent items. */ + /** List of possible subsequent items. */ const enum rte_flow_item_type *const next_item; }; @@ -98,7 +156,8 @@ nfp_flow_dev_to_priv(struct rte_eth_dev *dev) } static int -nfp_mask_id_alloc(struct nfp_flow_priv *priv, uint8_t *mask_id) +nfp_mask_id_alloc(struct nfp_flow_priv *priv, + uint8_t *mask_id) { uint8_t temp_id; uint8_t freed_id; @@ -130,7 +189,8 @@ nfp_mask_id_alloc(struct nfp_flow_priv *priv, uint8_t *mask_id) } static int -nfp_mask_id_free(struct nfp_flow_priv *priv, uint8_t mask_id) +nfp_mask_id_free(struct nfp_flow_priv *priv, + uint8_t mask_id) { struct circ_buf *ring; @@ -248,18 +308,18 @@ nfp_check_mask_add(struct nfp_flow_priv *priv, mask_entry = nfp_mask_table_search(priv, mask_data, mask_len); if (mask_entry == NULL) { - /* mask entry does not exist, let's create one */ + /* Mask entry does not exist, let's create one */ ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id); if (ret != 0) return false; - - *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } else { - /* mask entry already exist */ + /* Mask entry already exist */ mask_entry->ref_cnt++; *mask_id = mask_entry->mask_id; } + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; + return true; } @@ -338,6 +398,48 @@ nfp_flow_table_search(struct nfp_flow_priv *priv, return flow_find; } +int +nfp_flow_table_add_merge(struct nfp_flow_priv *priv, + struct rte_flow *nfp_flow) +{ + struct rte_flow *flow_find; + + flow_find = nfp_flow_table_search(priv, nfp_flow); + if (flow_find != NULL) { + if (nfp_flow->merge_flag || flow_find->merge_flag) { + flow_find->merge_flag = true; + flow_find->ref_cnt++; + return 0; + } + + PMD_DRV_LOG(ERR, "Add to flow table failed."); + return -EINVAL; + } + + return nfp_flow_table_add(priv, nfp_flow); +} + +static int +nfp_flow_table_delete_merge(struct nfp_flow_priv *priv, + struct rte_flow *nfp_flow) +{ + struct rte_flow *flow_find; + + flow_find = nfp_flow_table_search(priv, nfp_flow); + if (flow_find == NULL) { + PMD_DRV_LOG(ERR, "Can't delete a non-existing flow."); + return -EINVAL; + } + + if (nfp_flow->merge_flag || flow_find->merge_flag) { + flow_find->ref_cnt--; + if (flow_find->ref_cnt > 0) + return 0; + } + + return nfp_flow_table_delete(priv, nfp_flow); +} + static struct rte_flow * nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id) { @@ -372,7 +474,7 @@ nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id) return NULL; } -static void +void nfp_flow_free(struct rte_flow *nfp_flow) { rte_free(nfp_flow->payload.meta); @@ -394,6 +496,7 @@ nfp_stats_id_alloc(struct nfp_flow_priv *priv, uint32_t *ctx) priv->stats_ids.init_unallocated--; priv->active_mem_unit = 0; } + return 0; } @@ -421,8 +524,8 @@ nfp_stats_id_free(struct nfp_flow_priv *priv, uint32_t ctx) /* Check if buffer is full */ ring = &priv->stats_ids.free_list; - if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size * - NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1)) + if (CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size * + NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1) == 0) return -ENOBUFS; memcpy(&ring->buf[ring->head], &ctx, NFP_FL_STATS_ELEM_RS); @@ -507,7 +610,7 @@ nfp_tun_add_ipv6_off(struct nfp_app_fw_flower *app_fw_flower, rte_spinlock_lock(&priv->ipv6_off_lock); LIST_FOREACH(entry, &priv->ipv6_off_list, next) { - if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) { + if (memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr)) == 0) { entry->ref_count++; rte_spinlock_unlock(&priv->ipv6_off_lock); return 0; @@ -520,6 +623,7 @@ nfp_tun_add_ipv6_off(struct nfp_app_fw_flower *app_fw_flower, PMD_DRV_LOG(ERR, "Mem error when offloading IP6 address."); return -ENOMEM; } + memcpy(tmp_entry->ipv6_addr, ipv6, sizeof(tmp_entry->ipv6_addr)); tmp_entry->ref_count = 1; @@ -541,7 +645,7 @@ nfp_tun_del_ipv6_off(struct nfp_app_fw_flower *app_fw_flower, rte_spinlock_lock(&priv->ipv6_off_lock); LIST_FOREACH(entry, &priv->ipv6_off_list, next) { - if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) { + if (memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr)) == 0) { entry->ref_count--; if (entry->ref_count == 0) { LIST_REMOVE(entry, next); @@ -571,14 +675,14 @@ nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr, struct nfp_flower_ext_meta *ext_meta = NULL; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); if (ext_meta != NULL) key_layer2 = rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2); - if (key_layer2 & NFP_FLOWER_LAYER2_TUN_IPV6) { - if (key_layer2 & NFP_FLOWER_LAYER2_GRE) { + if ((key_layer2 & NFP_FLOWER_LAYER2_TUN_IPV6) != 0) { + if ((key_layer2 & NFP_FLOWER_LAYER2_GRE) != 0) { gre6 = (struct nfp_flower_ipv6_gre_tun *)(nfp_flow->payload.mask_data - sizeof(struct nfp_flower_ipv6_gre_tun)); ret = nfp_tun_del_ipv6_off(repr->app_fw_flower, gre6->ipv6.ipv6_dst); @@ -588,7 +692,7 @@ nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr, ret = nfp_tun_del_ipv6_off(repr->app_fw_flower, udp6->ipv6.ipv6_dst); } } else { - if (key_layer2 & NFP_FLOWER_LAYER2_GRE) { + if ((key_layer2 & NFP_FLOWER_LAYER2_GRE) != 0) { gre4 = (struct nfp_flower_ipv4_gre_tun *)(nfp_flow->payload.mask_data - sizeof(struct nfp_flower_ipv4_gre_tun)); ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, gre4->ipv4.dst); @@ -603,7 +707,8 @@ nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr, } static void -nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer) +nfp_flower_compile_meta_tci(char *mbuf_off, + struct nfp_fl_key_ls *key_layer) { struct nfp_flower_meta_tci *tci_meta; @@ -614,7 +719,8 @@ nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer) } static void -nfp_flower_update_meta_tci(char *exact, uint8_t mask_id) +nfp_flower_update_meta_tci(char *exact, + uint8_t mask_id) { struct nfp_flower_meta_tci *meta_tci; @@ -623,7 +729,8 @@ nfp_flower_update_meta_tci(char *exact, uint8_t mask_id) } static void -nfp_flower_compile_ext_meta(char *mbuf_off, struct nfp_fl_key_ls *key_layer) +nfp_flower_compile_ext_meta(char *mbuf_off, + struct nfp_fl_key_ls *key_layer) { struct nfp_flower_ext_meta *ext_meta; @@ -653,11 +760,12 @@ static void nfp_flow_compile_metadata(struct nfp_flow_priv *priv, struct rte_flow *nfp_flow, struct nfp_fl_key_ls *key_layer, - uint32_t stats_ctx) + uint32_t stats_ctx, + uint64_t cookie) { - struct nfp_fl_rule_metadata *nfp_flow_meta; - char *mbuf_off_exact; char *mbuf_off_mask; + char *mbuf_off_exact; + struct nfp_fl_rule_metadata *nfp_flow_meta; /* * Convert to long words as firmware expects @@ -669,7 +777,7 @@ nfp_flow_compile_metadata(struct nfp_flow_priv *priv, nfp_flow_meta->act_len = key_layer->act_size >> NFP_FL_LW_SIZ; nfp_flow_meta->flags = 0; nfp_flow_meta->host_ctx_id = rte_cpu_to_be_32(stats_ctx); - nfp_flow_meta->host_cookie = rte_rand(); + nfp_flow_meta->host_cookie = rte_cpu_to_be_64(cookie); nfp_flow_meta->flow_version = rte_cpu_to_be_64(priv->flower_version); mbuf_off_exact = nfp_flow->payload.unmasked_data; @@ -682,7 +790,7 @@ nfp_flow_compile_metadata(struct nfp_flow_priv *priv, mbuf_off_mask += sizeof(struct nfp_flower_meta_tci); /* Populate Extended Metadata if required */ - if (key_layer->key_layer & NFP_FLOWER_LAYER_EXT_META) { + if ((key_layer->key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) { nfp_flower_compile_ext_meta(mbuf_off_exact, key_layer); nfp_flower_compile_ext_meta(mbuf_off_mask, key_layer); mbuf_off_exact += sizeof(struct nfp_flower_ext_meta); @@ -712,7 +820,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], case RTE_FLOW_ITEM_TYPE_ETH: PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected"); /* - * eth is set with no specific params. + * Eth is set with no specific params. * NFP does not need this. */ if (item->spec == NULL) @@ -773,7 +881,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], key_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun); /* * The outer l3 layer information is - * in `struct nfp_flower_ipv4_udp_tun` + * in `struct nfp_flower_ipv4_udp_tun`. */ key_ls->key_size -= sizeof(struct nfp_flower_ipv4); } else if (outer_ip6_flag) { @@ -783,7 +891,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], key_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun); /* * The outer l3 layer information is - * in `struct nfp_flower_ipv6_udp_tun` + * in `struct nfp_flower_ipv6_udp_tun`. */ key_ls->key_size -= sizeof(struct nfp_flower_ipv6); } else { @@ -804,7 +912,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], key_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun); /* * The outer l3 layer information is - * in `struct nfp_flower_ipv4_udp_tun` + * in `struct nfp_flower_ipv4_udp_tun`. */ key_ls->key_size -= sizeof(struct nfp_flower_ipv4); } else if (outer_ip6_flag) { @@ -812,7 +920,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], key_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun); /* * The outer l3 layer information is - * in `struct nfp_flower_ipv6_udp_tun` + * in `struct nfp_flower_ipv6_udp_tun`. */ key_ls->key_size -= sizeof(struct nfp_flower_ipv6); } else { @@ -833,7 +941,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], key_ls->key_size += sizeof(struct nfp_flower_ipv4_gre_tun); /* * The outer l3 layer information is - * in `struct nfp_flower_ipv4_gre_tun` + * in `struct nfp_flower_ipv4_gre_tun`. */ key_ls->key_size -= sizeof(struct nfp_flower_ipv4); } else if (outer_ip6_flag) { @@ -841,7 +949,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], key_ls->key_size += sizeof(struct nfp_flower_ipv6_gre_tun); /* * The outer l3 layer information is - * in `struct nfp_flower_ipv6_gre_tun` + * in `struct nfp_flower_ipv6_gre_tun`. */ key_ls->key_size -= sizeof(struct nfp_flower_ipv6); } else { @@ -868,9 +976,9 @@ nfp_flow_key_layers_calculate_actions(const struct rte_flow_action actions[], int ret = 0; bool meter_flag = false; bool tc_hl_flag = false; - bool mac_set_flag = false; bool ip_set_flag = false; bool tp_set_flag = false; + bool mac_set_flag = false; bool ttl_tos_flag = false; const struct rte_flow_action *action; @@ -967,7 +1075,7 @@ nfp_flow_key_layers_calculate_actions(const struct rte_flow_action actions[], break; case RTE_FLOW_ACTION_TYPE_SET_TTL: PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_SET_TTL detected"); - if (key_ls->key_layer & NFP_FLOWER_LAYER_IPV4) { + if ((key_ls->key_layer & NFP_FLOWER_LAYER_IPV4) != 0) { if (!ttl_tos_flag) { key_ls->act_size += sizeof(struct nfp_fl_act_set_ip4_ttl_tos); @@ -1023,6 +1131,9 @@ nfp_flow_key_layers_calculate_actions(const struct rte_flow_action actions[], return -ENOTSUP; } break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_CONNTRACK detected"); + break; default: PMD_DRV_LOG(ERR, "Action type %d not supported.", action->type); return -ENOTSUP; @@ -1062,15 +1173,15 @@ nfp_flow_is_tunnel(struct rte_flow *nfp_flow) struct nfp_flower_meta_tci *meta_tci; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_VXLAN) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_VXLAN) != 0) return true; - if (!(meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) == 0) return false; ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); key_layer2 = rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2); - if (key_layer2 & (NFP_FLOWER_LAYER2_GENEVE | NFP_FLOWER_LAYER2_GRE)) + if ((key_layer2 & (NFP_FLOWER_LAYER2_GENEVE | NFP_FLOWER_LAYER2_GRE)) != 0) return true; return false; @@ -1166,7 +1277,7 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower, spec = item->spec; mask = item->mask ? item->mask : proc->mask_default; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); if (is_outer_layer && nfp_flow_is_tunnel(nfp_flow)) { @@ -1177,8 +1288,8 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower, hdr = is_mask ? &mask->hdr : &spec->hdr; - if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_GRE)) { + if (ext_meta != NULL && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_GRE) != 0) { ipv4_gre_tun = (struct nfp_flower_ipv4_gre_tun *)*mbuf_off; ipv4_gre_tun->ip_ext.tos = hdr->type_of_service; @@ -1200,10 +1311,10 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower, } /* - * reserve space for L4 info. - * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4 + * Reserve space for L4 info. + * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4. */ - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0) *mbuf_off += sizeof(struct nfp_flower_tp_ports); hdr = is_mask ? &mask->hdr : &spec->hdr; @@ -1244,7 +1355,7 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, spec = item->spec; mask = item->mask ? item->mask : proc->mask_default; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); if (is_outer_layer && nfp_flow_is_tunnel(nfp_flow)) { @@ -1256,8 +1367,8 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, hdr = is_mask ? &mask->hdr : &spec->hdr; vtc_flow = rte_be_to_cpu_32(hdr->vtc_flow); - if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_GRE)) { + if (ext_meta != NULL && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_GRE) != 0) { ipv6_gre_tun = (struct nfp_flower_ipv6_gre_tun *)*mbuf_off; ipv6_gre_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT; @@ -1283,10 +1394,10 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower, } /* - * reserve space for L4 info. - * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv6 + * Reserve space for L4 info. + * rte_flow has ipv6 before L4 but NFP flower fw requires L4 before ipv6. */ - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0) *mbuf_off += sizeof(struct nfp_flower_tp_ports); hdr = is_mask ? &mask->hdr : &spec->hdr; @@ -1330,16 +1441,16 @@ nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower *app_fw_flower, } meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) { + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) { ipv4 = (struct nfp_flower_ipv4 *) - (*mbuf_off - sizeof(struct nfp_flower_ipv4)); + (*mbuf_off - sizeof(struct nfp_flower_ipv4)); ports = (struct nfp_flower_tp_ports *) - ((char *)ipv4 - sizeof(struct nfp_flower_tp_ports)); + ((char *)ipv4 - sizeof(struct nfp_flower_tp_ports)); } else { /* IPv6 */ ipv6 = (struct nfp_flower_ipv6 *) - (*mbuf_off - sizeof(struct nfp_flower_ipv6)); + (*mbuf_off - sizeof(struct nfp_flower_ipv6)); ports = (struct nfp_flower_tp_ports *) - ((char *)ipv6 - sizeof(struct nfp_flower_tp_ports)); + ((char *)ipv6 - sizeof(struct nfp_flower_tp_ports)); } mask = item->mask ? item->mask : proc->mask_default; @@ -1353,7 +1464,7 @@ nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower *app_fw_flower, tcp_flags = spec->hdr.tcp_flags; } - if (ipv4) { + if (ipv4 != NULL) { if (tcp_flags & RTE_TCP_FIN_FLAG) ipv4->ip_ext.flags |= NFP_FL_TCP_FLAG_FIN; if (tcp_flags & RTE_TCP_SYN_FLAG) @@ -1408,12 +1519,12 @@ nfp_flow_merge_udp(__rte_unused struct nfp_app_fw_flower *app_fw_flower, } meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) { + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) { ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) - - sizeof(struct nfp_flower_tp_ports); + sizeof(struct nfp_flower_tp_ports); } else {/* IPv6 */ ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv6) - - sizeof(struct nfp_flower_tp_ports); + sizeof(struct nfp_flower_tp_ports); } ports = (struct nfp_flower_tp_ports *)ports_off; @@ -1451,12 +1562,12 @@ nfp_flow_merge_sctp(__rte_unused struct nfp_app_fw_flower *app_fw_flower, } meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) { + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) { ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) - - sizeof(struct nfp_flower_tp_ports); + sizeof(struct nfp_flower_tp_ports); } else { /* IPv6 */ ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv6) - - sizeof(struct nfp_flower_tp_ports); + sizeof(struct nfp_flower_tp_ports); } ports = (struct nfp_flower_tp_ports *)ports_off; @@ -1491,7 +1602,7 @@ nfp_flow_merge_vxlan(struct nfp_app_fw_flower *app_fw_flower, struct nfp_flower_ext_meta *ext_meta = NULL; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); spec = item->spec; @@ -1503,8 +1614,8 @@ nfp_flow_merge_vxlan(struct nfp_app_fw_flower *app_fw_flower, mask = item->mask ? item->mask : proc->mask_default; hdr = is_mask ? &mask->hdr : &spec->hdr; - if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6)) { + if (ext_meta != NULL && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) { tun6 = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off; tun6->tun_id = hdr->vx_vni; if (!is_mask) @@ -1517,8 +1628,8 @@ nfp_flow_merge_vxlan(struct nfp_app_fw_flower *app_fw_flower, } vxlan_end: - if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6)) + if (ext_meta != NULL && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) *mbuf_off += sizeof(struct nfp_flower_ipv6_udp_tun); else *mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun); @@ -1545,7 +1656,7 @@ nfp_flow_merge_geneve(struct nfp_app_fw_flower *app_fw_flower, struct nfp_flower_ext_meta *ext_meta = NULL; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); spec = item->spec; @@ -1557,8 +1668,8 @@ nfp_flow_merge_geneve(struct nfp_app_fw_flower *app_fw_flower, mask = item->mask ? item->mask : proc->mask_default; geneve = is_mask ? mask : spec; - if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6)) { + if (ext_meta != NULL && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) { tun6 = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off; tun6->tun_id = rte_cpu_to_be_32((geneve->vni[0] << 16) | (geneve->vni[1] << 8) | (geneve->vni[2])); @@ -1573,8 +1684,8 @@ nfp_flow_merge_geneve(struct nfp_app_fw_flower *app_fw_flower, } geneve_end: - if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6)) { + if (ext_meta != NULL && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) { *mbuf_off += sizeof(struct nfp_flower_ipv6_udp_tun); } else { *mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun); @@ -1601,8 +1712,8 @@ nfp_flow_merge_gre(__rte_unused struct nfp_app_fw_flower *app_fw_flower, ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1); /* NVGRE is the only supported GRE tunnel type */ - if (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6) { + if ((rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) { tun6 = (struct nfp_flower_ipv6_gre_tun *)*mbuf_off; if (is_mask) tun6->ethertype = rte_cpu_to_be_16(~0); @@ -1649,8 +1760,8 @@ nfp_flow_merge_gre_key(struct nfp_app_fw_flower *app_fw_flower, mask = item->mask ? item->mask : proc->mask_default; tun_key = is_mask ? *mask : *spec; - if (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6) { + if ((rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) { tun6 = (struct nfp_flower_ipv6_gre_tun *)*mbuf_off; tun6->tun_key = tun_key; tun6->tun_flags = rte_cpu_to_be_16(NFP_FL_GRE_FLAG_KEY); @@ -1665,8 +1776,8 @@ nfp_flow_merge_gre_key(struct nfp_app_fw_flower *app_fw_flower, } gre_key_end: - if (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & - NFP_FLOWER_LAYER2_TUN_IPV6) + if ((rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) & + NFP_FLOWER_LAYER2_TUN_IPV6) != 0) *mbuf_off += sizeof(struct nfp_flower_ipv6_gre_tun); else *mbuf_off += sizeof(struct nfp_flower_ipv4_gre_tun); @@ -1687,7 +1798,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_IPV6), - .mask_support = &(const struct rte_flow_item_eth){ + .mask_support = &(const struct rte_flow_item_eth) { .hdr = { .dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff", .src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff", @@ -1702,7 +1813,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { [RTE_FLOW_ITEM_TYPE_VLAN] = { .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_IPV6), - .mask_support = &(const struct rte_flow_item_vlan){ + .mask_support = &(const struct rte_flow_item_vlan) { .hdr = { .vlan_tci = RTE_BE16(0xefff), .eth_proto = RTE_BE16(0xffff), @@ -1718,7 +1829,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_SCTP, RTE_FLOW_ITEM_TYPE_GRE), - .mask_support = &(const struct rte_flow_item_ipv4){ + .mask_support = &(const struct rte_flow_item_ipv4) { .hdr = { .type_of_service = 0xff, .fragment_offset = RTE_BE16(0xffff), @@ -1737,7 +1848,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_SCTP, RTE_FLOW_ITEM_TYPE_GRE), - .mask_support = &(const struct rte_flow_item_ipv6){ + .mask_support = &(const struct rte_flow_item_ipv6) { .hdr = { .vtc_flow = RTE_BE32(0x0ff00000), .proto = 0xff, @@ -1754,7 +1865,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { .merge = nfp_flow_merge_ipv6, }, [RTE_FLOW_ITEM_TYPE_TCP] = { - .mask_support = &(const struct rte_flow_item_tcp){ + .mask_support = &(const struct rte_flow_item_tcp) { .hdr = { .tcp_flags = 0xff, .src_port = RTE_BE16(0xffff), @@ -1768,7 +1879,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { [RTE_FLOW_ITEM_TYPE_UDP] = { .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_GENEVE), - .mask_support = &(const struct rte_flow_item_udp){ + .mask_support = &(const struct rte_flow_item_udp) { .hdr = { .src_port = RTE_BE16(0xffff), .dst_port = RTE_BE16(0xffff), @@ -1779,7 +1890,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { .merge = nfp_flow_merge_udp, }, [RTE_FLOW_ITEM_TYPE_SCTP] = { - .mask_support = &(const struct rte_flow_item_sctp){ + .mask_support = &(const struct rte_flow_item_sctp) { .hdr = { .src_port = RTE_BE16(0xffff), .dst_port = RTE_BE16(0xffff), @@ -1791,7 +1902,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { }, [RTE_FLOW_ITEM_TYPE_VXLAN] = { .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH), - .mask_support = &(const struct rte_flow_item_vxlan){ + .mask_support = &(const struct rte_flow_item_vxlan) { .hdr = { .vx_vni = RTE_BE32(0xffffff00), }, @@ -1802,7 +1913,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { }, [RTE_FLOW_ITEM_TYPE_GENEVE] = { .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH), - .mask_support = &(const struct rte_flow_item_geneve){ + .mask_support = &(const struct rte_flow_item_geneve) { .vni = "\xff\xff\xff", }, .mask_default = &rte_flow_item_geneve_mask, @@ -1811,7 +1922,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = { }, [RTE_FLOW_ITEM_TYPE_GRE] = { .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_GRE_KEY), - .mask_support = &(const struct rte_flow_item_gre){ + .mask_support = &(const struct rte_flow_item_gre) { .c_rsvd0_ver = RTE_BE16(0xa000), .protocol = RTE_BE16(0xffff), }, @@ -1832,8 +1943,8 @@ static int nfp_flow_item_check(const struct rte_flow_item *item, const struct nfp_flow_item_proc *proc) { + size_t i; int ret = 0; - unsigned int i; const uint8_t *mask; /* item->last and item->mask cannot exist without item->spec. */ @@ -1843,13 +1954,13 @@ nfp_flow_item_check(const struct rte_flow_item *item, " without a corresponding 'spec'."); return -EINVAL; } + /* No spec, no mask, no problem. */ return 0; } - mask = item->mask ? - (const uint8_t *)item->mask : - (const uint8_t *)proc->mask_default; + mask = item->mask ? (const uint8_t *)item->mask : + (const uint8_t *)proc->mask_default; /* * Single-pass check to make sure that: @@ -1890,7 +2001,7 @@ nfp_flow_is_tun_item(const struct rte_flow_item *item) return false; } -static bool +bool nfp_flow_inner_item_get(const struct rte_flow_item items[], const struct rte_flow_item **inner_item) { @@ -1929,7 +2040,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr, char **mbuf_off_mask, bool is_outer_layer) { - int i; + uint32_t i; int ret = 0; bool continue_flag = true; const struct rte_flow_item *item; @@ -2011,7 +2122,7 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor, sizeof(struct nfp_flower_in_port); meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) { + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) { mbuf_off_exact += sizeof(struct nfp_flower_ext_meta); mbuf_off_mask += sizeof(struct nfp_flower_ext_meta); } @@ -2019,7 +2130,7 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor, if (nfp_flow_tcp_flag_check(items)) nfp_flow->tcp_flag = true; - /* Check if this is a tunnel flow and get the inner item*/ + /* Check if this is a tunnel flow and get the inner item */ is_tun_flow = nfp_flow_inner_item_get(items, &loop_item); if (is_tun_flow) is_outer_layer = false; @@ -2163,7 +2274,7 @@ nfp_flow_action_set_ipv6(char *act_data, const struct rte_flow_action *action, bool ip_src_flag) { - int i; + uint32_t i; rte_be32_t tmp; size_t act_size; struct nfp_fl_act_set_ipv6_addr *set_ip; @@ -2454,7 +2565,7 @@ nfp_flower_add_tun_neigh_v4_decap(struct nfp_app_fw_flower *app_fw_flower, port = (struct nfp_flower_in_port *)(meta_tci + 1); eth = (struct nfp_flower_mac_mpls *)(port + 1); - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0) ipv4 = (struct nfp_flower_ipv4 *)((char *)eth + sizeof(struct nfp_flower_mac_mpls) + sizeof(struct nfp_flower_tp_ports)); @@ -2581,7 +2692,7 @@ nfp_flower_add_tun_neigh_v6_decap(struct nfp_app_fw_flower *app_fw_flower, port = (struct nfp_flower_in_port *)(meta_tci + 1); eth = (struct nfp_flower_mac_mpls *)(port + 1); - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0) ipv6 = (struct nfp_flower_ipv6 *)((char *)eth + sizeof(struct nfp_flower_mac_mpls) + sizeof(struct nfp_flower_tp_ports)); @@ -2923,6 +3034,7 @@ nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr, for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) { if (priv->pre_tun_bitmap[i] == 0) continue; + entry->mac_index = i; find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size); if (find_entry != NULL) { @@ -2949,6 +3061,7 @@ nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr, *index = entry->mac_index; priv->pre_tun_cnt++; + return 0; } @@ -2983,12 +3096,14 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) { if (priv->pre_tun_bitmap[i] == 0) continue; + entry->mac_index = i; find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size); if (find_entry != NULL) { find_entry->ref_cnt--; if (find_entry->ref_cnt != 0) goto free_entry; + priv->pre_tun_bitmap[i] = 0; break; } @@ -3077,7 +3192,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr, } meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; - if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) + if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) return nfp_flower_add_tun_neigh_v4_decap(app_fw_flower, nfp_flow_meta, nfp_flow); else return nfp_flower_add_tun_neigh_v6_decap(app_fw_flower, nfp_flow_meta, nfp_flow); @@ -3093,11 +3208,11 @@ nfp_flow_action_geneve_encap_v4(struct nfp_app_fw_flower *app_fw_flower, { uint64_t tun_id; const struct rte_ether_hdr *eth; + struct nfp_fl_act_pre_tun *pre_tun; + struct nfp_fl_act_set_tun *set_tun; const struct rte_flow_item_udp *udp; const struct rte_flow_item_ipv4 *ipv4; const struct rte_flow_item_geneve *geneve; - struct nfp_fl_act_pre_tun *pre_tun; - struct nfp_fl_act_set_tun *set_tun; size_t act_pre_size = sizeof(struct nfp_fl_act_pre_tun); size_t act_set_size = sizeof(struct nfp_fl_act_set_tun); @@ -3133,11 +3248,11 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower, uint8_t tos; uint64_t tun_id; const struct rte_ether_hdr *eth; + struct nfp_fl_act_pre_tun *pre_tun; + struct nfp_fl_act_set_tun *set_tun; const struct rte_flow_item_udp *udp; const struct rte_flow_item_ipv6 *ipv6; const struct rte_flow_item_geneve *geneve; - struct nfp_fl_act_pre_tun *pre_tun; - struct nfp_fl_act_set_tun *set_tun; size_t act_pre_size = sizeof(struct nfp_fl_act_pre_tun); size_t act_set_size = sizeof(struct nfp_fl_act_set_tun); @@ -3173,10 +3288,10 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, { uint64_t tun_id; const struct rte_ether_hdr *eth; - const struct rte_flow_item_ipv4 *ipv4; - const struct rte_flow_item_gre *gre; struct nfp_fl_act_pre_tun *pre_tun; struct nfp_fl_act_set_tun *set_tun; + const struct rte_flow_item_gre *gre; + const struct rte_flow_item_ipv4 *ipv4; size_t act_pre_size = sizeof(struct nfp_fl_act_pre_tun); size_t act_set_size = sizeof(struct nfp_fl_act_set_tun); @@ -3211,10 +3326,10 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, uint8_t tos; uint64_t tun_id; const struct rte_ether_hdr *eth; - const struct rte_flow_item_ipv6 *ipv6; - const struct rte_flow_item_gre *gre; struct nfp_fl_act_pre_tun *pre_tun; struct nfp_fl_act_set_tun *set_tun; + const struct rte_flow_item_gre *gre; + const struct rte_flow_item_ipv6 *ipv6; size_t act_pre_size = sizeof(struct nfp_fl_act_pre_tun); size_t act_set_size = sizeof(struct nfp_fl_act_set_tun); @@ -3258,9 +3373,9 @@ nfp_flow_action_raw_encap(struct nfp_app_fw_flower *app_fw_flower, return -EINVAL; } - /* Pre_tunnel action must be the first on action list. - * If other actions already exist, they need to be - * pushed forward. + /* + * Pre_tunnel action must be the first on action list. + * If other actions already exist, they need to be pushed forward. */ act_len = act_data - actions; if (act_len != 0) { @@ -3359,12 +3474,12 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, uint32_t count; char *position; char *action_data; - bool ttl_tos_flag = false; - bool tc_hl_flag = false; bool drop_flag = false; + bool tc_hl_flag = false; bool ip_set_flag = false; bool tp_set_flag = false; bool mac_set_flag = false; + bool ttl_tos_flag = false; uint32_t total_actions = 0; const struct rte_flow_action *action; struct nfp_flower_meta_tci *meta_tci; @@ -3567,6 +3682,9 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, return -EINVAL; position += sizeof(struct nfp_fl_act_meter); break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_CONNTRACK"); + break; default: PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type); return -ENOTSUP; @@ -3582,11 +3700,14 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, return 0; } -static struct rte_flow * +struct rte_flow * nfp_flow_process(struct nfp_flower_representor *representor, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool validate_flag) + bool validate_flag, + uint64_t cookie, + bool install_flag, + bool merge_flag) { int ret; char *hash_data; @@ -3622,9 +3743,10 @@ nfp_flow_process(struct nfp_flower_representor *representor, goto free_stats; } - nfp_flow->install_flag = true; + nfp_flow->install_flag = install_flag; + nfp_flow->merge_flag = merge_flag; - nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx); + nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx, cookie); ret = nfp_flow_compile_items(representor, items, nfp_flow); if (ret != 0) { @@ -3656,7 +3778,7 @@ nfp_flow_process(struct nfp_flower_representor *representor, /* Find the flow in hash table */ flow_find = nfp_flow_table_search(priv, nfp_flow); - if (flow_find != NULL) { + if (flow_find != NULL && !nfp_flow->merge_flag && !flow_find->merge_flag) { PMD_DRV_LOG(ERR, "This flow is already exist."); if (!nfp_check_mask_remove(priv, mask_data, mask_len, &nfp_flow_meta->flags)) { @@ -3687,6 +3809,10 @@ nfp_flow_setup(struct nfp_flower_representor *representor, __rte_unused struct rte_flow_error *error, bool validate_flag) { + uint64_t cookie; + const struct rte_flow_item *item; + const struct rte_flow_item *ct_item = NULL; + if (attr->group != 0) PMD_DRV_LOG(INFO, "Pretend we support group attribute."); @@ -3696,10 +3822,23 @@ nfp_flow_setup(struct nfp_flower_representor *representor, if (attr->transfer != 0) PMD_DRV_LOG(INFO, "Pretend we support transfer attribute."); - return nfp_flow_process(representor, items, actions, validate_flag); + for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { + if (item->type == RTE_FLOW_ITEM_TYPE_CONNTRACK) { + ct_item = item; + break; + } + } + + cookie = rte_rand(); + + if (ct_item != NULL) + return nfp_ct_flow_setup(representor, items, actions, + ct_item, validate_flag, cookie); + + return nfp_flow_process(representor, items, actions, validate_flag, cookie, true, false); } -static int +int nfp_flow_teardown(struct nfp_flow_priv *priv, struct rte_flow *nfp_flow, bool validate_flag) @@ -3799,7 +3938,7 @@ nfp_flow_create(struct rte_eth_dev *dev, } /* Add the flow to flow hash table */ - ret = nfp_flow_table_add(priv, nfp_flow); + ret = nfp_flow_table_add_merge(priv, nfp_flow); if (ret != 0) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Add flow to the flow table failed."); @@ -3827,14 +3966,16 @@ nfp_flow_create(struct rte_eth_dev *dev, return NULL; } -static int +int nfp_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *nfp_flow, struct rte_flow_error *error) { int ret; + uint64_t cookie; struct rte_flow *flow_find; struct nfp_flow_priv *priv; + struct nfp_ct_map_entry *me; struct nfp_app_fw_flower *app_fw_flower; struct nfp_flower_representor *representor; @@ -3842,6 +3983,12 @@ nfp_flow_destroy(struct rte_eth_dev *dev, app_fw_flower = representor->app_fw_flower; priv = app_fw_flower->flow_priv; + /* Find the flow in ct_map_table */ + cookie = rte_be_to_cpu_64(nfp_flow->payload.meta->host_cookie); + me = nfp_ct_map_table_search(priv, (char *)&cookie, sizeof(uint64_t)); + if (me != NULL) + return nfp_ct_offload_del(dev, me, error); + /* Find the flow in flow hash table */ flow_find = nfp_flow_table_search(priv, nfp_flow); if (flow_find == NULL) { @@ -3902,7 +4049,7 @@ nfp_flow_destroy(struct rte_eth_dev *dev, } /* Delete the flow from flow hash table */ - ret = nfp_flow_table_delete(priv, nfp_flow); + ret = nfp_flow_table_delete_merge(priv, nfp_flow); if (ret != 0) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Delete flow from the flow table failed."); @@ -3961,10 +4108,12 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, void *data) { bool reset; + uint64_t cookie; uint32_t ctx_id; struct rte_flow *flow; struct nfp_flow_priv *priv; struct nfp_fl_stats *stats; + struct nfp_ct_map_entry *me; struct rte_flow_query_count *query; priv = nfp_flow_dev_to_priv(dev); @@ -3978,8 +4127,15 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, reset = query->reset; memset(query, 0, sizeof(*query)); - ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id); - stats = &priv->stats[ctx_id]; + /* Find the flow in ct_map_table */ + cookie = rte_be_to_cpu_64(nfp_flow->payload.meta->host_cookie); + me = nfp_ct_map_table_search(priv, (char *)&cookie, sizeof(uint64_t)); + if (me != NULL) { + stats = nfp_ct_flow_stats_get(priv, me); + } else { + ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id); + stats = &priv->stats[ctx_id]; + } rte_spinlock_lock(&priv->stats_lock); if (stats->pkts != 0 && stats->bytes != 0) { @@ -4134,10 +4290,10 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) size_t stats_size; uint64_t ctx_count; uint64_t ctx_split; + struct nfp_flow_priv *priv; char mask_name[RTE_HASH_NAMESIZE]; char flow_name[RTE_HASH_NAMESIZE]; char pretun_name[RTE_HASH_NAMESIZE]; - struct nfp_flow_priv *priv; struct nfp_app_fw_flower *app_fw_flower; const char *pci_name = strchr(pf_dev->pci_dev->name, ':') + 1; @@ -4171,6 +4327,23 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, }; + struct rte_hash_parameters ct_zone_hash_params = { + .name = "ct_zone_table", + .entries = 65536, + .hash_func = rte_jhash, + .socket_id = rte_socket_id(), + .key_len = sizeof(uint32_t), + .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, + }; + + struct rte_hash_parameters ct_map_hash_params = { + .name = "ct_map_table", + .hash_func = rte_jhash, + .socket_id = rte_socket_id(), + .key_len = sizeof(uint32_t), + .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY, + }; + ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl, "CONFIG_FC_HOST_CTX_COUNT", &ret); if (ret < 0) { @@ -4218,7 +4391,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) goto free_mask_id; } - /* flow stats */ + /* Flow stats */ rte_spinlock_init(&priv->stats_lock); stats_size = (ctx_count & NFP_FL_STAT_ID_STAT) | ((ctx_split - 1) & NFP_FL_STAT_ID_MU_NUM); @@ -4232,7 +4405,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) goto free_stats_id; } - /* mask table */ + /* Mask table */ mask_hash_params.hash_func_init_val = priv->hash_seed; priv->mask_table = rte_hash_create(&mask_hash_params); if (priv->mask_table == NULL) { @@ -4241,7 +4414,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) goto free_stats; } - /* flow table */ + /* Flow table */ flow_hash_params.hash_func_init_val = priv->hash_seed; flow_hash_params.entries = ctx_count; priv->flow_table = rte_hash_create(&flow_hash_params); @@ -4251,7 +4424,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) goto free_mask_table; } - /* pre tunnel table */ + /* Pre tunnel table */ priv->pre_tun_cnt = 1; pre_tun_hash_params.hash_func_init_val = priv->hash_seed; priv->pre_tun_table = rte_hash_create(&pre_tun_hash_params); @@ -4261,19 +4434,42 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) goto free_flow_table; } - /* ipv4 off list */ + /* ct zone table */ + ct_zone_hash_params.hash_func_init_val = priv->hash_seed; + priv->ct_zone_table = rte_hash_create(&ct_zone_hash_params); + if (priv->ct_zone_table == NULL) { + PMD_INIT_LOG(ERR, "ct zone table creation failed"); + ret = -ENOMEM; + goto free_pre_tnl_table; + } + + /* ct map table */ + ct_map_hash_params.hash_func_init_val = priv->hash_seed; + ct_map_hash_params.entries = ctx_count; + priv->ct_map_table = rte_hash_create(&ct_map_hash_params); + if (priv->ct_map_table == NULL) { + PMD_INIT_LOG(ERR, "ct map table creation failed"); + ret = -ENOMEM; + goto free_ct_zone_table; + } + + /* IPv4 off list */ rte_spinlock_init(&priv->ipv4_off_lock); LIST_INIT(&priv->ipv4_off_list); - /* ipv6 off list */ + /* IPv6 off list */ rte_spinlock_init(&priv->ipv6_off_lock); LIST_INIT(&priv->ipv6_off_list); - /* neighbor next list */ + /* Neighbor next list */ LIST_INIT(&priv->nn_list); return 0; +free_ct_zone_table: + rte_hash_free(priv->ct_zone_table); +free_pre_tnl_table: + rte_hash_free(priv->pre_tun_table); free_flow_table: rte_hash_free(priv->flow_table); free_mask_table: @@ -4299,6 +4495,8 @@ nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev) app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv); priv = app_fw_flower->flow_priv; + rte_hash_free(priv->ct_map_table); + rte_hash_free(priv->ct_zone_table); rte_hash_free(priv->pre_tun_table); rte_hash_free(priv->flow_table); rte_hash_free(priv->mask_table); diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h index 414bd4573b2..aeb24458f34 100644 --- a/drivers/net/nfp/nfp_flow.h +++ b/drivers/net/nfp/nfp_flow.h @@ -3,89 +3,24 @@ * All rights reserved. */ -#ifndef _NFP_FLOW_H_ -#define _NFP_FLOW_H_ +#ifndef __NFP_FLOW_H__ +#define __NFP_FLOW_H__ -#include -#include -#include - -#define NFP_FLOWER_LAYER_EXT_META RTE_BIT32(0) -#define NFP_FLOWER_LAYER_PORT RTE_BIT32(1) -#define NFP_FLOWER_LAYER_MAC RTE_BIT32(2) -#define NFP_FLOWER_LAYER_TP RTE_BIT32(3) -#define NFP_FLOWER_LAYER_IPV4 RTE_BIT32(4) -#define NFP_FLOWER_LAYER_IPV6 RTE_BIT32(5) -#define NFP_FLOWER_LAYER_CT RTE_BIT32(6) -#define NFP_FLOWER_LAYER_VXLAN RTE_BIT32(7) - -#define NFP_FLOWER_LAYER2_GRE RTE_BIT32(0) -#define NFP_FLOWER_LAYER2_QINQ RTE_BIT32(4) -#define NFP_FLOWER_LAYER2_GENEVE RTE_BIT32(5) -#define NFP_FLOWER_LAYER2_GENEVE_OP RTE_BIT32(6) -#define NFP_FLOWER_LAYER2_TUN_IPV6 RTE_BIT32(7) - -/* Compressed HW representation of TCP Flags */ -#define NFP_FL_TCP_FLAG_FIN RTE_BIT32(0) -#define NFP_FL_TCP_FLAG_SYN RTE_BIT32(1) -#define NFP_FL_TCP_FLAG_RST RTE_BIT32(2) -#define NFP_FL_TCP_FLAG_PSH RTE_BIT32(3) -#define NFP_FL_TCP_FLAG_URG RTE_BIT32(4) - -#define NFP_FL_META_FLAG_MANAGE_MASK RTE_BIT32(7) - -#define NFP_FLOWER_MASK_VLAN_CFI RTE_BIT32(12) - -#define NFP_MASK_TABLE_ENTRIES 1024 - -/* The maximum action list size (in bytes) supported by the NFP. */ -#define NFP_FL_MAX_A_SIZ 1216 +#include "nfp_common.h" /* The firmware expects lengths in units of long words */ #define NFP_FL_LW_SIZ 2 -#define NFP_FL_SC_ACT_DROP 0x80000000 -#define NFP_FL_SC_ACT_USER 0x7D000000 -#define NFP_FL_SC_ACT_POPV 0x6A000000 -#define NFP_FL_SC_ACT_NULL 0x00000000 - -/* GRE Tunnel flags */ -#define NFP_FL_GRE_FLAG_KEY (1 << 2) - -/* Action opcodes */ -#define NFP_FL_ACTION_OPCODE_OUTPUT 0 -#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 -#define NFP_FL_ACTION_OPCODE_POP_VLAN 2 -#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3 -#define NFP_FL_ACTION_OPCODE_POP_MPLS 4 -#define NFP_FL_ACTION_OPCODE_USERSPACE 5 -#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6 -#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 -#define NFP_FL_ACTION_OPCODE_SET_MPLS 8 -#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 -#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 -#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 -#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 -#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13 -#define NFP_FL_ACTION_OPCODE_SET_UDP 14 -#define NFP_FL_ACTION_OPCODE_SET_TCP 15 -#define NFP_FL_ACTION_OPCODE_PRE_LAG 16 -#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 -#define NFP_FL_ACTION_OPCODE_PRE_GS 18 -#define NFP_FL_ACTION_OPCODE_GS 19 -#define NFP_FL_ACTION_OPCODE_PUSH_NSH 20 -#define NFP_FL_ACTION_OPCODE_POP_NSH 21 -#define NFP_FL_ACTION_OPCODE_SET_QUEUE 22 -#define NFP_FL_ACTION_OPCODE_CONNTRACK 23 -#define NFP_FL_ACTION_OPCODE_METER 24 -#define NFP_FL_ACTION_OPCODE_CT_NAT_EXT 25 -#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26 -#define NFP_FL_ACTION_OPCODE_NUM 32 - -#define NFP_FL_OUT_FLAGS_LAST RTE_BIT32(15) - -/* Tunnel ports */ -#define NFP_FL_PORT_TYPE_TUN 0x50000000 +/* + * Maximum number of items in struct rte_flow_action_vxlan_encap. + * ETH / IPv4(6) / UDP / VXLAN / END + */ +#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5 + +struct vxlan_data { + struct rte_flow_action_vxlan_encap conf; + struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM]; +}; enum nfp_flower_tun_type { NFP_FL_TUN_NONE = 0, @@ -191,30 +126,41 @@ struct nfp_ipv6_addr_entry { struct nfp_flow_priv { uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */ uint64_t flower_version; /**< Flow version, always increase. */ - /* mask hash table */ + + /* Mask hash table */ struct nfp_fl_mask_id mask_ids; /**< Entry for mask hash table */ struct rte_hash *mask_table; /**< Hash table to store mask ids. */ - /* flow hash table */ + + /* Flow hash table */ struct rte_hash *flow_table; /**< Hash table to store flow rules. */ - /* flow stats */ + + /* Flow stats */ uint32_t active_mem_unit; /**< The size of active mem units. */ uint32_t total_mem_units; /**< The size of total mem units. */ uint32_t stats_ring_size; /**< The size of stats id ring. */ struct nfp_fl_stats_id stats_ids; /**< The stats id ring. */ struct nfp_fl_stats *stats; /**< Store stats of flow. */ rte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */ - /* pre tunnel rule */ + + /* Pre tunnel rule */ uint16_t pre_tun_cnt; /**< The size of pre tunnel rule */ uint8_t pre_tun_bitmap[NFP_TUN_PRE_TUN_RULE_LIMIT]; /**< Bitmap of pre tunnel rule */ struct rte_hash *pre_tun_table; /**< Hash table to store pre tunnel rule */ + /* IPv4 off */ LIST_HEAD(, nfp_ipv4_addr_entry) ipv4_off_list; /**< Store ipv4 off */ rte_spinlock_t ipv4_off_lock; /**< Lock the ipv4 off list */ + /* IPv6 off */ LIST_HEAD(, nfp_ipv6_addr_entry) ipv6_off_list; /**< Store ipv6 off */ rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */ - /* neighbor next */ + + /* Neighbor next */ LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */ + /* Conntrack */ + struct rte_hash *ct_zone_table; /**< Hash table to store ct zone entry */ + struct nfp_ct_zone_entry *ct_zone_wc; /**< The wildcard ct zone entry */ + struct rte_hash *ct_map_table; /**< Hash table to store ct map entry */ }; struct rte_flow { @@ -226,11 +172,34 @@ struct rte_flow { uint32_t port_id; bool install_flag; bool tcp_flag; /**< Used in the SET_TP_* action */ + bool merge_flag; enum nfp_flow_type type; + uint16_t ref_cnt; }; +/* Forward declaration */ +struct nfp_flower_representor; + int nfp_flow_priv_init(struct nfp_pf_dev *pf_dev); void nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev); int nfp_net_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops); - -#endif /* _NFP_FLOW_H_ */ +bool nfp_flow_inner_item_get(const struct rte_flow_item items[], + const struct rte_flow_item **inner_item); +struct rte_flow *nfp_flow_process(struct nfp_flower_representor *representor, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool validate_flag, + uint64_t cookie, + bool install_flag, + bool merge_flag); +int nfp_flow_table_add_merge(struct nfp_flow_priv *priv, + struct rte_flow *nfp_flow); +int nfp_flow_teardown(struct nfp_flow_priv *priv, + struct rte_flow *nfp_flow, + bool validate_flag); +void nfp_flow_free(struct rte_flow *nfp_flow); +int nfp_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *nfp_flow, + struct rte_flow_error *error); + +#endif /* __NFP_FLOW_H__ */ diff --git a/drivers/net/nfp/nfp_ipsec.c b/drivers/net/nfp/nfp_ipsec.c new file mode 100644 index 00000000000..a76ba2a91d0 --- /dev/null +++ b/drivers/net/nfp/nfp_ipsec.c @@ -0,0 +1,1453 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Corigine Systems, Inc. + * All rights reserved. + */ + +#include "nfp_ipsec.h" + +#include +#include +#include + +#include +#include + +#include "nfp_common.h" +#include "nfp_ctrl.h" +#include "nfp_logs.h" +#include "nfp_rxtx.h" + +#define NFP_UDP_ESP_PORT 4500 + +static const struct rte_cryptodev_capabilities nfp_crypto_caps[] = { + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + .auth = { + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, + .block_size = 64, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 12, + .max = 16, + .increment = 4 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + .auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 20, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 10, + .max = 12, + .increment = 2 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + .auth = { + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .block_size = 64, + .key_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + .digest_size = { + .min = 12, + .max = 16, + .increment = 4 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + .auth = { + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .block_size = 128, + .key_size = { + .min = 48, + .max = 48, + .increment = 0 + }, + .digest_size = { + .min = 12, + .max = 24, + .increment = 12 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + .auth = { + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .block_size = 128, + .key_size = { + .min = 64, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 12, + .max = 32, + .increment = 4 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + .cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 16, + .increment = 8 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + .cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 8, + .max = 16, + .increment = 8 + }, + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + .aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 0, + .max = 1024, + .increment = 1 + }, + .iv_size = { + .min = 8, + .max = 16, + .increment = 4 + } + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + .aead = { + .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, + .block_size = 16, + .key_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 0, + .max = 1024, + .increment = 1 + }, + .iv_size = { + .min = 8, + .max = 16, + .increment = 4 + } + }, + }, + }, + { + .op = RTE_CRYPTO_OP_TYPE_UNDEFINED, + .sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED + }, + } +}; + +static const struct rte_security_capability nfp_security_caps[] = { + { /* IPsec Inline Crypto Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps + }, + { /* IPsec Inline Crypto Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto Transport Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps + }, + { /* IPsec Inline Crypto Transport Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Protocol Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps + }, + { /* IPsec Inline Protocol Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Protocol Transport Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps + }, + { /* IPsec Inline Protocol Transport Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .options = { + .udp_encap = 1, + .stats = 1, + .esn = 1 + } + }, + .crypto_capabilities = nfp_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { + .action = RTE_SECURITY_ACTION_TYPE_NONE + } +}; + +/* IPsec config message cmd codes */ +enum nfp_ipsec_cfg_msg_cmd_codes { + NFP_IPSEC_CFG_MSG_ADD_SA, /**< Add a new SA */ + NFP_IPSEC_CFG_MSG_INV_SA, /**< Invalidate an existing SA */ + NFP_IPSEC_CFG_MSG_MODIFY_SA, /**< Modify an existing SA */ + NFP_IPSEC_CFG_MSG_GET_SA_STATS, /**< Report SA counters, flags, etc. */ + NFP_IPSEC_CFG_MSG_GET_SEQ_NUMS, /**< Allocate sequence numbers */ + NFP_IPSEC_CFG_MSG_LAST +}; + +enum nfp_ipsec_cfg_msg_rsp_codes { + NFP_IPSEC_CFG_MSG_OK, + NFP_IPSEC_CFG_MSG_FAILED, + NFP_IPSEC_CFG_MSG_SA_VALID, + NFP_IPSEC_CFG_MSG_SA_HASH_ADD_FAILED, + NFP_IPSEC_CFG_MSG_SA_HASH_DEL_FAILED, + NFP_IPSEC_CFG_MSG_SA_INVALID_CMD +}; + +enum nfp_ipsec_mode { + NFP_IPSEC_MODE_TRANSPORT, + NFP_IPSEC_MODE_TUNNEL, +}; + +enum nfp_ipsec_protocol { + NFP_IPSEC_PROTOCOL_AH, + NFP_IPSEC_PROTOCOL_ESP, +}; + +/* Cipher modes */ +enum nfp_ipsec_cimode { + NFP_IPSEC_CIMODE_ECB, + NFP_IPSEC_CIMODE_CBC, + NFP_IPSEC_CIMODE_CFB, + NFP_IPSEC_CIMODE_OFB, + NFP_IPSEC_CIMODE_CTR, +}; + +/* Hash types */ +enum nfp_ipsec_hash_type { + NFP_IPSEC_HASH_NONE, + NFP_IPSEC_HASH_MD5_96, + NFP_IPSEC_HASH_SHA1_96, + NFP_IPSEC_HASH_SHA256_96, + NFP_IPSEC_HASH_SHA384_96, + NFP_IPSEC_HASH_SHA512_96, + NFP_IPSEC_HASH_MD5_128, + NFP_IPSEC_HASH_SHA1_80, + NFP_IPSEC_HASH_SHA256_128, + NFP_IPSEC_HASH_SHA384_192, + NFP_IPSEC_HASH_SHA512_256, + NFP_IPSEC_HASH_GF128_128, + NFP_IPSEC_HASH_POLY1305_128, +}; + +/* Cipher types */ +enum nfp_ipsec_cipher_type { + NFP_IPSEC_CIPHER_NULL, + NFP_IPSEC_CIPHER_3DES, + NFP_IPSEC_CIPHER_AES128, + NFP_IPSEC_CIPHER_AES192, + NFP_IPSEC_CIPHER_AES256, + NFP_IPSEC_CIPHER_AES128_NULL, + NFP_IPSEC_CIPHER_AES192_NULL, + NFP_IPSEC_CIPHER_AES256_NULL, + NFP_IPSEC_CIPHER_CHACHA20, +}; + +/* Don't Fragment types */ +enum nfp_ipsec_df_type { + NFP_IPSEC_DF_CLEAR, + NFP_IPSEC_DF_SET, + NFP_IPSEC_DF_COPY, +}; + +static int +nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *hw, + struct nfp_ipsec_msg *msg) +{ + int ret; + uint32_t i; + uint32_t msg_size; + + msg_size = RTE_DIM(msg->raw); + msg->rsp = NFP_IPSEC_CFG_MSG_OK; + + for (i = 0; i < msg_size; i++) + nn_cfg_writel(hw, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]); + + ret = nfp_net_mbox_reconfig(hw, NFP_NET_CFG_MBOX_CMD_IPSEC); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox"); + return ret; + } + + /* + * Not all commands and callers make use of response message data. But + * leave this up to the caller and always read and store the full + * response. One example where the data is needed is for statistics. + */ + for (i = 0; i < msg_size; i++) + msg->raw[i] = nn_cfg_readl(hw, NFP_NET_CFG_MBOX_VAL + 4 * i); + + switch (msg->rsp) { + case NFP_IPSEC_CFG_MSG_OK: + ret = 0; + break; + case NFP_IPSEC_CFG_MSG_SA_INVALID_CMD: + ret = -EINVAL; + break; + case NFP_IPSEC_CFG_MSG_SA_VALID: + ret = -EEXIST; + break; + case NFP_IPSEC_CFG_MSG_FAILED: + /* FALLTHROUGH */ + case NFP_IPSEC_CFG_MSG_SA_HASH_ADD_FAILED: + /* FALLTHROUGH */ + case NFP_IPSEC_CFG_MSG_SA_HASH_DEL_FAILED: + ret = -EIO; + break; + default: + ret = -EDOM; + } + + return ret; +} + +/** + * Get valid SA index from SA table + * + * @param data + * SA table pointer + * @param sa_idx + * SA table index pointer + * + * @return + * Negative number on full or repeat, 0 on success + * + * Note: multiple sockets may create same SA session. + */ +static void +nfp_get_sa_entry(struct nfp_net_ipsec_data *data, + int *sa_idx) +{ + uint32_t i; + + for (i = 0; i < NFP_NET_IPSEC_MAX_SA_CNT; i++) { + if (data->sa_entries[i] == NULL) { + *sa_idx = i; + break; + } + } +} + +static void +nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, + uint16_t iv_len, + const char *iv_string) +{ + int i; + char *save; + char *iv_b; + char *iv_str; + uint8_t *cfg_iv; + + iv_str = strdup(iv_string); + cfg_iv = (uint8_t *)cfg->aesgcm_fields.iv; + + for (i = 0; i < iv_len; i++) { + iv_b = strtok_r(i ? NULL : iv_str, ",", &save); + if (iv_b == NULL) + break; + + cfg_iv[i] = strtoul(iv_b, NULL, 0); + } + + *(uint32_t *)cfg_iv = rte_be_to_cpu_32(*(uint32_t *)cfg_iv); + *(uint32_t *)&cfg_iv[4] = rte_be_to_cpu_32(*(uint32_t *)&cfg_iv[4]); + + free(iv_str); +} + +static int +set_aes_keylen(uint32_t key_length, + struct ipsec_add_sa *cfg) +{ + switch (key_length << 3) { + case 128: + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_AES128; + break; + case 192: + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_AES192; + break; + case 256: + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_AES256; + break; + default: + PMD_DRV_LOG(ERR, "AES cipher key length is illegal!"); + return -EINVAL; + } + + return 0; +} + +/* Map rte_security_session_conf aead algo to NFP aead algo */ +static int +nfp_aead_map(struct rte_eth_dev *eth_dev, + struct rte_crypto_aead_xform *aead, + uint32_t key_length, + struct ipsec_add_sa *cfg) +{ + int ret; + uint32_t i; + uint32_t index; + uint16_t iv_len; + uint32_t offset; + uint32_t device_id; + const char *iv_str; + const uint32_t *key; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + device_id = hw->device_id; + offset = 0; + + switch (aead->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + if (aead->digest_length != 16) { + PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_AES_GCM!"); + return -EINVAL; + } + + cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR; + cfg->ctrl_word.hash = NFP_IPSEC_HASH_GF128_128; + + ret = set_aes_keylen(key_length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to set AES_GCM key length!"); + return -EINVAL; + } + + break; + case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: + if (device_id != PCI_DEVICE_ID_NFP3800_PF_NIC) { + PMD_DRV_LOG(ERR, "Unsupported aead CHACHA20_POLY1305 algorithm!"); + return -EINVAL; + } + + if (aead->digest_length != 16) { + PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_CHACHA20_POLY1305"); + return -EINVAL; + } + + /* Aead->alg_key_len includes 32-bit salt */ + if (key_length != 32) { + PMD_DRV_LOG(ERR, "Unsupported CHACHA20 key length"); + return -EINVAL; + } + + /* The CHACHA20's mode is not configured */ + cfg->ctrl_word.hash = NFP_IPSEC_HASH_POLY1305_128; + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_CHACHA20; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported aead algorithm!"); + return -EINVAL; + } + + key = (const uint32_t *)(aead->key.data); + + /* + * The CHACHA20's key order needs to be adjusted based on hardware design. + * Unadjusted order: {K0, K1, K2, K3, K4, K5, K6, K7} + * Adjusted order: {K4, K5, K6, K7, K0, K1, K2, K3} + */ + if (aead->algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) + offset = key_length / sizeof(cfg->cipher_key[0]) << 1; + + for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) { + index = (i + offset) % (key_length / sizeof(cfg->cipher_key[0])); + cfg->cipher_key[index] = rte_cpu_to_be_32(*key++); + } + + /* + * The iv of the FW is equal to ESN by default. Reading the + * iv of the configuration information is not supported. + */ + iv_str = getenv("ETH_SEC_IV_OVR"); + if (iv_str != NULL) { + iv_len = aead->iv.length; + nfp_aesgcm_iv_update(cfg, iv_len, iv_str); + } + + return 0; +} + +/* Map rte_security_session_conf cipher algo to NFP cipher algo */ +static int +nfp_cipher_map(struct rte_eth_dev *eth_dev, + struct rte_crypto_cipher_xform *cipher, + uint32_t key_length, + struct ipsec_add_sa *cfg) +{ + int ret; + uint32_t i; + uint32_t device_id; + const uint32_t *key; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + device_id = hw->device_id; + + switch (cipher->algo) { + case RTE_CRYPTO_CIPHER_NULL: + cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL; + break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + if (device_id == PCI_DEVICE_ID_NFP3800_PF_NIC) { + PMD_DRV_LOG(ERR, "Unsupported 3DESCBC encryption algorithm!"); + return -EINVAL; + } + + cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES; + break; + case RTE_CRYPTO_CIPHER_AES_CBC: + cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; + ret = set_aes_keylen(key_length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to set cipher key length!"); + return -EINVAL; + } + + break; + default: + PMD_DRV_LOG(ERR, "Unsupported cipher alg!"); + return -EINVAL; + } + + key = (const uint32_t *)(cipher->key.data); + if (key_length > sizeof(cfg->cipher_key)) { + PMD_DRV_LOG(ERR, "Insufficient space for offloaded key"); + return -EINVAL; + } + + for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) + cfg->cipher_key[i] = rte_cpu_to_be_32(*key++); + + return 0; +} + +static void +set_md5hmac(struct ipsec_add_sa *cfg, + uint32_t *digest_length) +{ + switch (*digest_length) { + case 96: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_96; + break; + case 128: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_128; + break; + default: + *digest_length = 0; + } +} + +static void +set_sha1hmac(struct ipsec_add_sa *cfg, + uint32_t *digest_length) +{ + switch (*digest_length) { + case 96: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_96; + break; + case 80: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_80; + break; + default: + *digest_length = 0; + } +} + +static void +set_sha2_256hmac(struct ipsec_add_sa *cfg, + uint32_t *digest_length) +{ + switch (*digest_length) { + case 96: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_96; + break; + case 128: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_128; + break; + default: + *digest_length = 0; + } +} + +static void +set_sha2_384hmac(struct ipsec_add_sa *cfg, + uint32_t *digest_length) +{ + switch (*digest_length) { + case 96: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_96; + break; + case 192: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_192; + break; + default: + *digest_length = 0; + } +} + +static void +set_sha2_512hmac(struct ipsec_add_sa *cfg, + uint32_t *digest_length) +{ + switch (*digest_length) { + case 96: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_96; + break; + case 256: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_256; + break; + default: + *digest_length = 0; + } +} + +/* Map rte_security_session_conf auth algo to NFP auth algo */ +static int +nfp_auth_map(struct rte_eth_dev *eth_dev, + struct rte_crypto_auth_xform *auth, + uint32_t digest_length, + struct ipsec_add_sa *cfg) +{ + uint32_t i; + uint8_t key_length; + uint32_t device_id; + const uint32_t *key; + struct nfp_net_hw *hw; + + if (digest_length == 0) { + PMD_DRV_LOG(ERR, "Auth digest length is illegal!"); + return -EINVAL; + } + + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + device_id = hw->device_id; + digest_length = digest_length << 3; + + switch (auth->algo) { + case RTE_CRYPTO_AUTH_NULL: + cfg->ctrl_word.hash = NFP_IPSEC_HASH_NONE; + digest_length = 1; + break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + if (device_id == PCI_DEVICE_ID_NFP3800_PF_NIC) { + PMD_DRV_LOG(ERR, "Unsupported MD5HMAC authentication algorithm!"); + return -EINVAL; + } + + set_md5hmac(cfg, &digest_length); + break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + set_sha1hmac(cfg, &digest_length); + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + set_sha2_256hmac(cfg, &digest_length); + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + set_sha2_384hmac(cfg, &digest_length); + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + set_sha2_512hmac(cfg, &digest_length); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported auth alg!"); + return -EINVAL; + } + + if (digest_length == 0) { + PMD_DRV_LOG(ERR, "Unsupported authentication algorithm digest length"); + return -EINVAL; + } + + key = (const uint32_t *)(auth->key.data); + key_length = auth->key.length; + if (key_length > sizeof(cfg->auth_key)) { + PMD_DRV_LOG(ERR, "Insufficient space for offloaded auth key!"); + return -EINVAL; + } + + for (i = 0; i < key_length / sizeof(cfg->auth_key[0]); i++) + cfg->auth_key[i] = rte_cpu_to_be_32(*key++); + + return 0; +} + +static int +nfp_crypto_msg_build(struct rte_eth_dev *eth_dev, + struct rte_security_session_conf *conf, + struct nfp_ipsec_msg *msg) +{ + int ret; + struct ipsec_add_sa *cfg; + struct rte_crypto_sym_xform *cur; + struct rte_crypto_sym_xform *next; + enum rte_security_ipsec_sa_direction direction; + + cur = conf->crypto_xform; + if (cur == NULL) { + PMD_DRV_LOG(ERR, "Unsupported crypto_xform is NULL!"); + return -EINVAL; + } + + next = cur->next; + direction = conf->ipsec.direction; + cfg = &msg->cfg_add_sa; + + switch (cur->type) { + case RTE_CRYPTO_SYM_XFORM_AEAD: + /* Aead transforms can be used for either inbound/outbound IPsec SAs */ + if (next != NULL) { + PMD_DRV_LOG(ERR, "Next crypto_xform type should be NULL!"); + return -EINVAL; + } + + ret = nfp_aead_map(eth_dev, &cur->aead, cur->aead.key.length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to map aead alg!"); + return ret; + } + + cfg->aesgcm_fields.salt = rte_cpu_to_be_32(conf->ipsec.salt); + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + /* Only support Auth + Cipher for inbound */ + if (direction != RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + PMD_DRV_LOG(ERR, "Direction should be INGRESS, but it is not!"); + return -EINVAL; + } + + if (next == NULL || next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { + PMD_DRV_LOG(ERR, "Next crypto_xfrm should be cipher, but it is not!"); + return -EINVAL; + } + + ret = nfp_auth_map(eth_dev, &cur->auth, cur->auth.digest_length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to map auth alg!"); + return ret; + } + + ret = nfp_cipher_map(eth_dev, &next->cipher, next->cipher.key.length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to map cipher alg!"); + return ret; + } + + break; + case RTE_CRYPTO_SYM_XFORM_CIPHER: + /* Only support Cipher + Auth for outbound */ + if (direction != RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { + PMD_DRV_LOG(ERR, "Direction should be EGRESS, but it is not!"); + return -EINVAL; + } + + if (next == NULL || next->type != RTE_CRYPTO_SYM_XFORM_AUTH) { + PMD_DRV_LOG(ERR, "Next crypto_xfrm should be auth, but it is not!"); + return -EINVAL; + } + + ret = nfp_cipher_map(eth_dev, &cur->cipher, cur->cipher.key.length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to map cipher alg!"); + return ret; + } + + ret = nfp_auth_map(eth_dev, &next->auth, next->auth.digest_length, cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to map auth alg!"); + return ret; + } + + break; + default: + PMD_DRV_LOG(ERR, "Unsupported crypto_xform type!"); + return -EINVAL; + } + + return 0; +} + +static int +nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, + struct rte_security_session_conf *conf, + struct nfp_ipsec_msg *msg) +{ + int ret; + struct ipsec_add_sa *cfg; + enum rte_security_ipsec_tunnel_type type; + + cfg = &msg->cfg_add_sa; + cfg->spi = conf->ipsec.spi; + cfg->pmtu_limit = 0xffff; + + /* + * UDP encapsulation + * + * 1: Do UDP encapsulation/decapsulation + * 0: No UDP encapsulation + */ + if (conf->ipsec.options.udp_encap == 1) { + cfg->udp_enable = 1; + cfg->natt_dst_port = NFP_UDP_ESP_PORT; + cfg->natt_src_port = NFP_UDP_ESP_PORT; + } + + if (conf->ipsec.options.copy_df == 1) + cfg->df_ctrl = NFP_IPSEC_DF_COPY; + else if (conf->ipsec.tunnel.ipv4.df != 0) + cfg->df_ctrl = NFP_IPSEC_DF_SET; + else + cfg->df_ctrl = NFP_IPSEC_DF_CLEAR; + + switch (conf->action_type) { + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + cfg->ctrl_word.encap_dsbl = 1; + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + cfg->ctrl_word.encap_dsbl = 0; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported IPsec action for offload, action: %d", + conf->action_type); + return -EINVAL; + } + + switch (conf->ipsec.proto) { + case RTE_SECURITY_IPSEC_SA_PROTO_ESP: + cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_ESP; + break; + case RTE_SECURITY_IPSEC_SA_PROTO_AH: + cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported IPsec protocol for offload, protocol: %d", + conf->ipsec.proto); + return -EINVAL; + } + + switch (conf->ipsec.mode) { + case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL: + type = conf->ipsec.tunnel.type; + cfg->ctrl_word.mode = NFP_IPSEC_MODE_TUNNEL; + if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + cfg->src_ip.v4 = conf->ipsec.tunnel.ipv4.src_ip; + cfg->dst_ip.v4 = conf->ipsec.tunnel.ipv4.dst_ip; + cfg->ipv6 = 0; + } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { + cfg->src_ip.v6 = conf->ipsec.tunnel.ipv6.src_addr; + cfg->dst_ip.v6 = conf->ipsec.tunnel.ipv6.dst_addr; + cfg->ipv6 = 1; + } else { + PMD_DRV_LOG(ERR, "Unsupported address family!"); + return -EINVAL; + } + + break; + case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT: + type = conf->ipsec.tunnel.type; + cfg->ctrl_word.mode = NFP_IPSEC_MODE_TRANSPORT; + if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); + cfg->ipv6 = 0; + } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { + memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); + cfg->ipv6 = 1; + } else { + PMD_DRV_LOG(ERR, "Unsupported address family!"); + return -EINVAL; + } + + break; + default: + PMD_DRV_LOG(ERR, "Unsupported IPsec mode for offload, mode: %d", + conf->ipsec.mode); + return -EINVAL; + } + + ret = nfp_crypto_msg_build(eth_dev, conf, msg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to build auth/crypto/aead msg!"); + return ret; + } + + return 0; +} + +static int +nfp_crypto_create_session(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *session) +{ + int ret; + int sa_idx; + struct nfp_net_hw *hw; + struct nfp_ipsec_msg msg; + struct rte_eth_dev *eth_dev; + struct nfp_ipsec_session *priv_session; + + /* Only support IPsec at present */ + if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) { + PMD_DRV_LOG(ERR, "Unsupported non-IPsec offload!"); + return -EINVAL; + } + + sa_idx = -1; + eth_dev = device; + priv_session = SECURITY_GET_SESS_PRIV(session); + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (hw->ipsec_data->sa_free_cnt == 0) { + PMD_DRV_LOG(ERR, "No space in SA table, spi: %d", conf->ipsec.spi); + return -EINVAL; + } + + nfp_get_sa_entry(hw->ipsec_data, &sa_idx); + + if (sa_idx < 0) { + PMD_DRV_LOG(ERR, "Failed to get SA entry!"); + return -EINVAL; + } + + memset(&msg, 0, sizeof(msg)); + ret = nfp_ipsec_msg_build(eth_dev, conf, &msg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to build IPsec msg!"); + return -EINVAL; + } + + msg.cmd = NFP_IPSEC_CFG_MSG_ADD_SA; + msg.sa_idx = sa_idx; + ret = nfp_ipsec_cfg_cmd_issue(hw, &msg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add SA to nic"); + return -EINVAL; + } + + priv_session->action = conf->action_type; + priv_session->ipsec = conf->ipsec; + priv_session->msg = msg.cfg_add_sa; + priv_session->sa_index = sa_idx; + priv_session->dev = eth_dev; + priv_session->user_data = conf->userdata; + + hw->ipsec_data->sa_free_cnt--; + hw->ipsec_data->sa_entries[sa_idx] = priv_session; + + return 0; +} + +static int +nfp_crypto_update_session(void *device __rte_unused, + struct rte_security_session *session, + struct rte_security_session_conf *conf) +{ + struct nfp_ipsec_session *priv_session; + + priv_session = SECURITY_GET_SESS_PRIV(session); + if (priv_session == NULL) + return -EINVAL; + + /* Update IPsec ESN value */ + if (priv_session->msg.ctrl_word.ext_seq != 0 && conf->ipsec.options.esn != 0) { + /* + * Store in nfp_ipsec_session for outbound SA for use + * in nfp_security_set_pkt_metadata() function. + */ + priv_session->ipsec.esn.hi = conf->ipsec.esn.hi; + priv_session->ipsec.esn.low = conf->ipsec.esn.low; + } + + return 0; +} + +static int +nfp_security_set_pkt_metadata(void *device, + struct rte_security_session *session, + struct rte_mbuf *m, + void *params) +{ + int offset; + uint64_t *sqn; + struct nfp_net_hw *hw; + struct rte_eth_dev *eth_dev; + struct nfp_ipsec_session *priv_session; + + sqn = params; + eth_dev = device; + priv_session = SECURITY_GET_SESS_PRIV(session); + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (priv_session->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { + struct nfp_tx_ipsec_desc_msg *desc_md; + + offset = hw->ipsec_data->pkt_dynfield_offset; + desc_md = RTE_MBUF_DYNFIELD(m, offset, struct nfp_tx_ipsec_desc_msg *); + + if (priv_session->msg.ctrl_word.ext_seq != 0 && sqn != NULL) { + desc_md->esn.low = rte_cpu_to_be_32(*sqn); + desc_md->esn.hi = rte_cpu_to_be_32(*sqn >> 32); + } else if (priv_session->msg.ctrl_word.ext_seq != 0) { + desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.low); + desc_md->esn.hi = rte_cpu_to_be_32(priv_session->ipsec.esn.hi); + } else { + desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.value); + desc_md->esn.hi = 0; + } + + desc_md->enc = 1; + desc_md->sa_idx = rte_cpu_to_be_32(priv_session->sa_index); + } + + return 0; +} + +/** + * Get discards packet statistics for each SA + * + * The sa_discard_stats contains the statistics of discards packets + * of an SA. This function calculates the sum total of discarded packets. + * + * @param errors + * The value is SA discards packet sum total + * @param sa_discard_stats + * The struct is SA discards packet Statistics + */ +static void +nfp_get_errorstats(uint64_t *errors, + struct ipsec_discard_stats *sa_discard_stats) +{ + uint32_t i; + uint32_t len; + uint32_t *perror; + + perror = &sa_discard_stats->discards_auth; + len = sizeof(struct ipsec_discard_stats) / sizeof(uint32_t); + + for (i = 0; i < len; i++) + *errors += *perror++; + + *errors -= sa_discard_stats->ipv4_id_counter; +} + +static int +nfp_security_session_get_stats(void *device, + struct rte_security_session *session, + struct rte_security_stats *stats) +{ + int ret; + struct nfp_net_hw *hw; + struct nfp_ipsec_msg msg; + struct rte_eth_dev *eth_dev; + struct ipsec_get_sa_stats *cfg_s; + struct rte_security_ipsec_stats *ips_s; + struct nfp_ipsec_session *priv_session; + enum rte_security_ipsec_sa_direction direction; + + eth_dev = device; + priv_session = SECURITY_GET_SESS_PRIV(session); + memset(&msg, 0, sizeof(msg)); + msg.cmd = NFP_IPSEC_CFG_MSG_GET_SA_STATS; + msg.sa_idx = priv_session->sa_index; + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + ret = nfp_ipsec_cfg_cmd_issue(hw, &msg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to get SA stats"); + return ret; + } + + cfg_s = &msg.cfg_get_stats; + direction = priv_session->ipsec.direction; + memset(stats, 0, sizeof(struct rte_security_stats)); /* Start with zeros */ + stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC; + ips_s = &stats->ipsec; + + /* Only display SA if any counters are non-zero */ + if (cfg_s->lifetime_byte_count != 0 || cfg_s->pkt_count != 0) { + if (direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + ips_s->ipackets = cfg_s->pkt_count; + ips_s->ibytes = cfg_s->lifetime_byte_count; + nfp_get_errorstats(&ips_s->ierrors, &cfg_s->sa_discard_stats); + } else { + ips_s->opackets = cfg_s->pkt_count; + ips_s->obytes = cfg_s->lifetime_byte_count; + nfp_get_errorstats(&ips_s->oerrors, &cfg_s->sa_discard_stats); + } + } + + return 0; +} + +static const struct rte_security_capability * +nfp_crypto_capabilities_get(void *device __rte_unused) +{ + return nfp_security_caps; +} + +static uint32_t +nfp_security_session_get_size(void *device __rte_unused) +{ + return sizeof(struct nfp_ipsec_session); +} + +static int +nfp_crypto_remove_sa(struct rte_eth_dev *eth_dev, + struct nfp_ipsec_session *priv_session) +{ + int ret; + uint32_t sa_index; + struct nfp_net_hw *hw; + struct nfp_ipsec_msg cfg; + + sa_index = priv_session->sa_index; + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + cfg.cmd = NFP_IPSEC_CFG_MSG_INV_SA; + cfg.sa_idx = sa_index; + ret = nfp_ipsec_cfg_cmd_issue(hw, &cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to remove SA!"); + return -EINVAL; + } + + hw->ipsec_data->sa_free_cnt++; + hw->ipsec_data->sa_entries[sa_index] = NULL; + + return 0; +} + +static int +nfp_crypto_remove_session(void *device, + struct rte_security_session *session) +{ + int ret; + struct rte_eth_dev *eth_dev; + struct nfp_ipsec_session *priv_session; + + eth_dev = device; + priv_session = SECURITY_GET_SESS_PRIV(session); + if (eth_dev != priv_session->dev) { + PMD_DRV_LOG(ERR, "Session not bound to this device"); + return -ENODEV; + } + + ret = nfp_crypto_remove_sa(eth_dev, priv_session); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to remove session"); + return -EFAULT; + } + + memset(priv_session, 0, sizeof(struct nfp_ipsec_session)); + + return 0; +} + +static const struct rte_security_ops nfp_security_ops = { + .session_create = nfp_crypto_create_session, + .session_update = nfp_crypto_update_session, + .session_get_size = nfp_security_session_get_size, + .session_stats_get = nfp_security_session_get_stats, + .session_destroy = nfp_crypto_remove_session, + .set_pkt_metadata = nfp_security_set_pkt_metadata, + .capabilities_get = nfp_crypto_capabilities_get, +}; + +static int +nfp_ipsec_ctx_create(struct rte_eth_dev *dev, + struct nfp_net_ipsec_data *data) +{ + struct rte_security_ctx *ctx; + static const struct rte_mbuf_dynfield pkt_md_dynfield = { + .name = "nfp_ipsec_crypto_pkt_metadata", + .size = sizeof(struct nfp_tx_ipsec_desc_msg), + .align = __alignof__(struct nfp_tx_ipsec_desc_msg), + }; + + ctx = rte_zmalloc("security_ctx", + sizeof(struct rte_security_ctx), 0); + if (ctx == NULL) { + PMD_INIT_LOG(ERR, "Failed to malloc security_ctx"); + return -ENOMEM; + } + + ctx->device = dev; + ctx->ops = &nfp_security_ops; + ctx->sess_cnt = 0; + dev->security_ctx = ctx; + + data->pkt_dynfield_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield); + if (data->pkt_dynfield_offset < 0) { + PMD_INIT_LOG(ERR, "Failed to register mbuf esn_dynfield"); + return -ENOMEM; + } + + return 0; +} + +int +nfp_ipsec_init(struct rte_eth_dev *dev) +{ + int ret; + uint32_t cap_extend; + struct nfp_net_hw *hw; + struct nfp_net_ipsec_data *data; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + cap_extend = hw->cap_ext; + if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) { + PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability"); + return 0; + } + + data = rte_zmalloc("ipsec_data", sizeof(struct nfp_net_ipsec_data), 0); + if (data == NULL) { + PMD_INIT_LOG(ERR, "Failed to malloc ipsec_data"); + return -ENOMEM; + } + + data->pkt_dynfield_offset = -1; + data->sa_free_cnt = NFP_NET_IPSEC_MAX_SA_CNT; + hw->ipsec_data = data; + + ret = nfp_ipsec_ctx_create(dev, data); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to create IPsec ctx"); + goto ipsec_cleanup; + } + + return 0; + +ipsec_cleanup: + nfp_ipsec_uninit(dev); + + return ret; +} + +static void +nfp_ipsec_ctx_destroy(struct rte_eth_dev *dev) +{ + if (dev->security_ctx != NULL) + rte_free(dev->security_ctx); +} + +void +nfp_ipsec_uninit(struct rte_eth_dev *dev) +{ + uint16_t i; + uint32_t cap_extend; + struct nfp_net_hw *hw; + struct nfp_ipsec_session *priv_session; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + cap_extend = hw->cap_ext; + if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) { + PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability"); + return; + } + + nfp_ipsec_ctx_destroy(dev); + + if (hw->ipsec_data == NULL) { + PMD_INIT_LOG(INFO, "IPsec data is NULL!"); + return; + } + + for (i = 0; i < NFP_NET_IPSEC_MAX_SA_CNT; i++) { + priv_session = hw->ipsec_data->sa_entries[i]; + if (priv_session != NULL) + memset(priv_session, 0, sizeof(struct nfp_ipsec_session)); + } + + rte_free(hw->ipsec_data); +} + diff --git a/drivers/net/nfp/nfp_ipsec.h b/drivers/net/nfp/nfp_ipsec.h new file mode 100644 index 00000000000..d7a729398a2 --- /dev/null +++ b/drivers/net/nfp/nfp_ipsec.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Corigine Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_IPSEC_H__ +#define __NFP_IPSEC_H__ + +#include + +#define NFP_NET_IPSEC_MAX_SA_CNT (16 * 1024) + +struct ipsec_aesgcm { /**< AES-GCM-ESP fields */ + uint32_t salt; /**< Initialized with SA */ + uint32_t iv[2]; /**< Firmware use only */ + uint32_t cntrl; + uint32_t zeros[4]; /**< Init to 0 with SA */ + uint32_t len_auth[2]; /**< Firmware use only */ + uint32_t len_cipher[2]; + uint32_t spare[4]; +}; + +struct sa_ctrl_word { + uint32_t hash :4; /**< From nfp_ipsec_hash_type */ + uint32_t cimode :4; /**< From nfp_ipsec_cipher_mode */ + uint32_t cipher :4; /**< From nfp_ipsec_cipher */ + uint32_t mode :2; /**< From nfp_ipsec_mode */ + uint32_t proto :2; /**< From nfp_ipsec_prot */ + uint32_t spare :1; /**< Should be 0 */ + uint32_t ena_arw:1; /**< Anti-Replay Window */ + uint32_t ext_seq:1; /**< 64-bit Sequence Num */ + uint32_t ext_arw:1; /**< 64b Anti-Replay Window */ + uint32_t spare1 :9; /**< Must be set to 0 */ + uint32_t encap_dsbl:1; /**< Encap/decap disable */ + uint32_t gen_seq:1; /**< Firmware Generate Seq #'s */ + uint32_t spare2 :1; /**< Must be set to 0 */ +}; + +union nfp_ip_addr { + struct in6_addr v6; + struct in_addr v4; +}; + +struct ipsec_add_sa { + uint32_t cipher_key[8]; /**< Cipher Key */ + union { + uint32_t auth_key[16]; /**< Authentication Key */ + struct ipsec_aesgcm aesgcm_fields; + }; + struct sa_ctrl_word ctrl_word; + uint32_t spi; /**< SPI Value */ + uint16_t pmtu_limit; /**< PMTU Limit */ + uint32_t spare :1; + uint32_t frag_check :1; /**< Stateful fragment checking flag */ + uint32_t bypass_DSCP:1; /**< Bypass DSCP Flag */ + uint32_t df_ctrl :2; /**< DF Control bits */ + uint32_t ipv6 :1; /**< Outbound IPv6 addr format */ + uint32_t udp_enable :1; /**< Add/Remove UDP header for NAT */ + uint32_t tfc_enable :1; /**< Traffic Flw Confidentiality */ + uint8_t spare1; + uint32_t soft_byte_cnt; /**< Soft lifetime byte count */ + uint32_t hard_byte_cnt; /**< Hard lifetime byte count */ + union nfp_ip_addr src_ip; /**< Src IP addr */ + union nfp_ip_addr dst_ip; /**< Dst IP addr */ + uint16_t natt_dst_port; /**< NAT-T UDP Header dst port */ + uint16_t natt_src_port; /**< NAT-T UDP Header src port */ + uint32_t soft_lifetime_limit; /**< Soft lifetime time limit */ + uint32_t hard_lifetime_limit; /**< Hard lifetime time limit */ + uint32_t sa_time_lo; /**< SA creation time lower 32bits, Ucode fills this in */ + uint32_t sa_time_hi; /**< SA creation time high 32bits, Ucode fills this in */ + uint16_t spare2; + uint16_t tfc_padding; /**< Traffic Flow Confidential Pad */ +}; + +struct ipsec_inv_sa { + uint32_t spare; +}; + +struct ipsec_discard_stats { + uint32_t discards_auth; /**< Auth failures */ + uint32_t discards_unsupported; /**< Unsupported crypto mode */ + uint32_t discards_alignment; /**< Alignment error */ + uint32_t discards_hard_bytelimit; /**< Hard byte Count limit */ + uint32_t discards_seq_num_wrap; /**< Sequ Number wrap */ + uint32_t discards_pmtu_exceeded; /**< PMTU Limit exceeded */ + uint32_t discards_arw_old_seq; /**< Anti-Replay seq small */ + uint32_t discards_arw_replay; /**< Anti-Replay seq rcvd */ + uint32_t discards_ctrl_word; /**< Bad SA Control word */ + uint32_t discards_ip_hdr_len; /**< Hdr offset from too high */ + uint32_t discards_eop_buf; /**< No EOP buffer */ + uint32_t ipv4_id_counter; /**< IPv4 ID field counter */ + uint32_t discards_isl_fail; /**< Inbound SPD Lookup failure */ + uint32_t discards_ext_unfound; /**< Ext header end */ + uint32_t discards_max_ext_hdrs; /**< Max ext header */ + uint32_t discards_non_ext_hdrs; /**< Non-extension headers */ + uint32_t discards_ext_hdr_too_big; /**< Ext header chain */ + uint32_t discards_hard_timelimit; /**< Time Limit */ +}; + +struct ipsec_get_sa_stats { + uint32_t seq_lo; /**< Sequence Number (low 32bits) */ + uint32_t seq_high; /**< Sequence Number (high 32bits) */ + uint32_t arw_counter_lo; /**< Anti-replay wndw cntr */ + uint32_t arw_counter_high; /**< Anti-replay wndw cntr */ + uint32_t arw_bitmap_lo; /**< Anti-replay wndw bitmap */ + uint32_t arw_bitmap_high; /**< Anti-replay wndw bitmap */ + uint32_t spare:1; + uint32_t soft_byte_exceeded :1; /**< Soft lifetime byte cnt exceeded */ + uint32_t hard_byte_exceeded :1; /**< Hard lifetime byte cnt exceeded */ + uint32_t soft_time_exceeded :1; /**< Soft lifetime time limit exceeded */ + uint32_t hard_time_exceeded :1; /**< Hard lifetime time limit exceeded */ + uint32_t spare1:27; + uint32_t lifetime_byte_count; + uint32_t pkt_count; + struct ipsec_discard_stats sa_discard_stats; +}; + +struct ipsec_get_seq { + uint32_t seq_nums; /**< Sequence numbers to allocate */ + uint32_t seq_num_low; /**< Return start seq num 31:00 */ + uint32_t seq_num_hi; /**< Return start seq num 63:32 */ +}; + +struct nfp_ipsec_msg { + union { + struct { + /** NFP IPsec SA cmd message codes */ + uint16_t cmd; + /** NFP IPsec SA response message */ + uint16_t rsp; + /** NFP IPsec SA index in driver SA table */ + uint16_t sa_idx; + /** Reserved */ + uint16_t spare; + union { + /** IPsec configure message for add SA */ + struct ipsec_add_sa cfg_add_sa; + /** IPsec configure message for del SA */ + struct ipsec_inv_sa cfg_inv_sa; + /** IPsec configure message for get SA stats */ + struct ipsec_get_sa_stats cfg_get_stats; + /** IPsec configure message for get SA seq numbers */ + struct ipsec_get_seq cfg_get_seq; + }; + }; + uint32_t raw[64]; + }; +}; + +struct nfp_ipsec_session { + /** Opaque user defined data */ + void *user_data; + /** NFP sa_entries database parameter index */ + uint32_t sa_index; + /** Point to physical ports ethernet device */ + struct rte_eth_dev *dev; + /** SA related NPF configuration data */ + struct ipsec_add_sa msg; + /** Security association configuration data */ + struct rte_security_ipsec_xform ipsec; + /** Security session action type */ + enum rte_security_session_action_type action; +} __rte_cache_aligned; + +struct nfp_net_ipsec_data { + int pkt_dynfield_offset; + uint32_t sa_free_cnt; + struct nfp_ipsec_session *sa_entries[NFP_NET_IPSEC_MAX_SA_CNT]; +}; + +enum nfp_ipsec_meta_layer { + NFP_IPSEC_META_SAIDX, /**< Order of SA index in metadata */ + NFP_IPSEC_META_SEQLOW, /**< Order of Sequence Number (low 32bits) in metadata */ + NFP_IPSEC_META_SEQHI, /**< Order of Sequence Number (high 32bits) in metadata */ +}; + +int nfp_ipsec_init(struct rte_eth_dev *dev); +void nfp_ipsec_uninit(struct rte_eth_dev *dev); + +#endif /* __NFP_IPSEC_H__ */ diff --git a/drivers/net/nfp/nfp_logs.h b/drivers/net/nfp/nfp_logs.h index 315a57811cd..690adabffd8 100644 --- a/drivers/net/nfp/nfp_logs.h +++ b/drivers/net/nfp/nfp_logs.h @@ -3,8 +3,8 @@ * All rights reserved. */ -#ifndef _NFP_LOGS_H_ -#define _NFP_LOGS_H_ +#ifndef __NFP_LOGS_H__ +#define __NFP_LOGS_H__ #include @@ -12,7 +12,6 @@ extern int nfp_logtype_init; #define PMD_INIT_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, nfp_logtype_init, \ "%s(): " fmt "\n", __func__, ## args) -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") #ifdef RTE_ETHDEV_DEBUG_RX extern int nfp_logtype_rx; @@ -42,4 +41,4 @@ extern int nfp_logtype_driver; rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \ "%s(): " fmt "\n", __func__, ## args) -#endif /* _NFP_LOGS_H_ */ +#endif /* __NFP_LOGS_H__ */ diff --git a/drivers/net/nfp/nfp_mtr.c b/drivers/net/nfp/nfp_mtr.c index afc4de4cc7c..255977ec223 100644 --- a/drivers/net/nfp/nfp_mtr.c +++ b/drivers/net/nfp/nfp_mtr.c @@ -3,17 +3,17 @@ * All rights reserved. */ -#include -#include -#include +#include "nfp_mtr.h" + #include +#include +#include -#include "nfp_common.h" -#include "nfp_mtr.h" -#include "nfp_logs.h" -#include "flower/nfp_flower.h" -#include "flower/nfp_flower_cmsg.h" #include "flower/nfp_flower_representor.h" +#include "nfp_logs.h" + +#define NFP_MAX_POLICY_CNT NFP_MAX_MTR_CNT +#define NFP_MAX_PROFILE_CNT NFP_MAX_MTR_CNT #define NFP_FL_QOS_PPS RTE_BIT32(15) #define NFP_FL_QOS_METER RTE_BIT32(10) diff --git a/drivers/net/nfp/nfp_mtr.h b/drivers/net/nfp/nfp_mtr.h index f5406381ab8..7ab0324721f 100644 --- a/drivers/net/nfp/nfp_mtr.h +++ b/drivers/net/nfp/nfp_mtr.h @@ -8,79 +8,13 @@ #include +#include "flower/nfp_flower_cmsg.h" + /** * The max meter count is determined by firmware. * The max count is 65536 defined by OF_METER_COUNT. */ #define NFP_MAX_MTR_CNT 65536 -#define NFP_MAX_POLICY_CNT NFP_MAX_MTR_CNT -#define NFP_MAX_PROFILE_CNT NFP_MAX_MTR_CNT - -/** - * See RFC 2698 for more details. - * Word[0](Flag options): - * [15] p(pps) 1 for pps, 0 for bps - * - * Meter control message - * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * +-------------------------------+-+---+-----+-+---------+-+---+-+ - * | Reserved |p| Y |TYPE |E| TSHFV |P| PC|R| - * +-------------------------------+-+---+-----+-+---------+-+---+-+ - * | Profile ID | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Token Bucket Peak | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Token Bucket Committed | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Peak Burst Size | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Committed Burst Size | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Peak Information Rate | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Committed Information Rate | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ -struct nfp_cfg_head { - rte_be32_t flags_opts; - rte_be32_t profile_id; -}; - -/** - * Struct nfp_profile_conf - profile config, offload to NIC - * @head: config head information - * @bkt_tkn_p: token bucket peak - * @bkt_tkn_c: token bucket committed - * @pbs: peak burst size - * @cbs: committed burst size - * @pir: peak information rate - * @cir: committed information rate - */ -struct nfp_profile_conf { - struct nfp_cfg_head head; - rte_be32_t bkt_tkn_p; - rte_be32_t bkt_tkn_c; - rte_be32_t pbs; - rte_be32_t cbs; - rte_be32_t pir; - rte_be32_t cir; -}; - -/** - * Struct nfp_mtr_stats_reply - meter stats, read from firmware - * @head: config head information - * @pass_bytes: count of passed bytes - * @pass_pkts: count of passed packets - * @drop_bytes: count of dropped bytes - * @drop_pkts: count of dropped packets - */ -struct nfp_mtr_stats_reply { - struct nfp_cfg_head head; - rte_be64_t pass_bytes; - rte_be64_t pass_pkts; - rte_be64_t drop_bytes; - rte_be64_t drop_pkts; -}; /** * Struct nfp_mtr_profile - meter profile, stored in driver diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c index f0c371ea2b4..efdca7fccfc 100644 --- a/drivers/net/nfp/nfp_rxtx.c +++ b/drivers/net/nfp/nfp_rxtx.c @@ -5,28 +5,174 @@ * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. */ -#include +#include "nfp_rxtx.h" + #include +#include -#include "nfp_common.h" -#include "nfp_ctrl.h" -#include "nfp_rxtx.h" -#include "nfp_logs.h" #include "nfd3/nfp_nfd3.h" #include "nfdk/nfp_nfdk.h" -#include "nfpcore/nfp_mip.h" -#include "nfpcore/nfp_rtsym.h" #include "flower/nfp_flower.h" +#include "nfp_ipsec.h" +#include "nfp_logs.h" + +/* Maximum number of supported VLANs in parsed form packet metadata. */ +#define NFP_META_MAX_VLANS 2 + +/* Record metadata parsed from packet */ +struct nfp_meta_parsed { + uint32_t port_id; /**< Port id value */ + uint32_t sa_idx; /**< IPsec SA index */ + uint32_t hash; /**< RSS hash value */ + uint8_t hash_type; /**< RSS hash type */ + uint8_t ipsec_type; /**< IPsec type */ + uint8_t vlan_layer; /**< The valid number of value in @vlan[] */ + /** + * Holds information parses from NFP_NET_META_VLAN. + * The inner most vlan starts at position 0 + */ + struct { + uint8_t offload; /**< Flag indicates whether VLAN is offloaded */ + uint8_t tpid; /**< Vlan TPID */ + uint16_t tci; /**< Vlan TCI (PCP + Priority + VID) */ + } vlan[NFP_META_MAX_VLANS]; +}; + +/* + * The bit format and map of nfp packet type for rxd.offload_info in Rx descriptor. + * + * Bit format about nfp packet type refers to the following: + * --------------------------------- + * 1 0 + * 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | |ol3|tunnel | l3 | l4 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Bit map about nfp packet type refers to the following: + * + * L4: bit 0~2, used for layer 4 or inner layer 4. + * 000: NFP_NET_PTYPE_L4_NONE + * 001: NFP_NET_PTYPE_L4_TCP + * 010: NFP_NET_PTYPE_L4_UDP + * 011: NFP_NET_PTYPE_L4_FRAG + * 100: NFP_NET_PTYPE_L4_NONFRAG + * 101: NFP_NET_PTYPE_L4_ICMP + * 110: NFP_NET_PTYPE_L4_SCTP + * 111: reserved + * + * L3: bit 3~5, used for layer 3 or inner layer 3. + * 000: NFP_NET_PTYPE_L3_NONE + * 001: NFP_NET_PTYPE_L3_IPV6 + * 010: NFP_NET_PTYPE_L3_IPV4 + * 011: NFP_NET_PTYPE_L3_IPV4_EXT + * 100: NFP_NET_PTYPE_L3_IPV6_EXT + * 101: NFP_NET_PTYPE_L3_IPV4_EXT_UNKNOWN + * 110: NFP_NET_PTYPE_L3_IPV6_EXT_UNKNOWN + * 111: reserved + * + * Tunnel: bit 6~9, used for tunnel. + * 0000: NFP_NET_PTYPE_TUNNEL_NONE + * 0001: NFP_NET_PTYPE_TUNNEL_VXLAN + * 0100: NFP_NET_PTYPE_TUNNEL_NVGRE + * 0101: NFP_NET_PTYPE_TUNNEL_GENEVE + * 0010, 0011, 0110~1111: reserved + * + * Outer L3: bit 10~11, used for outer layer 3. + * 00: NFP_NET_PTYPE_OUTER_L3_NONE + * 01: NFP_NET_PTYPE_OUTER_L3_IPV6 + * 10: NFP_NET_PTYPE_OUTER_L3_IPV4 + * 11: reserved + * + * Reserved: bit 10~15, used for extension. + */ + +/* Mask and offset about nfp packet type based on the bit map above. */ +#define NFP_NET_PTYPE_L4_MASK 0x0007 +#define NFP_NET_PTYPE_L3_MASK 0x0038 +#define NFP_NET_PTYPE_TUNNEL_MASK 0x03c0 +#define NFP_NET_PTYPE_OUTER_L3_MASK 0x0c00 + +#define NFP_NET_PTYPE_L4_OFFSET 0 +#define NFP_NET_PTYPE_L3_OFFSET 3 +#define NFP_NET_PTYPE_TUNNEL_OFFSET 6 +#define NFP_NET_PTYPE_OUTER_L3_OFFSET 10 + +/* Case about nfp packet type based on the bit map above. */ +#define NFP_NET_PTYPE_L4_NONE 0 +#define NFP_NET_PTYPE_L4_TCP 1 +#define NFP_NET_PTYPE_L4_UDP 2 +#define NFP_NET_PTYPE_L4_FRAG 3 +#define NFP_NET_PTYPE_L4_NONFRAG 4 +#define NFP_NET_PTYPE_L4_ICMP 5 +#define NFP_NET_PTYPE_L4_SCTP 6 + +#define NFP_NET_PTYPE_L3_NONE 0 +#define NFP_NET_PTYPE_L3_IPV6 1 +#define NFP_NET_PTYPE_L3_IPV4 2 +#define NFP_NET_PTYPE_L3_IPV4_EXT 3 +#define NFP_NET_PTYPE_L3_IPV6_EXT 4 +#define NFP_NET_PTYPE_L3_IPV4_EXT_UNKNOWN 5 +#define NFP_NET_PTYPE_L3_IPV6_EXT_UNKNOWN 6 + +#define NFP_NET_PTYPE_TUNNEL_NONE 0 +#define NFP_NET_PTYPE_TUNNEL_VXLAN 1 +#define NFP_NET_PTYPE_TUNNEL_NVGRE 4 +#define NFP_NET_PTYPE_TUNNEL_GENEVE 5 + +#define NFP_NET_PTYPE_OUTER_L3_NONE 0 +#define NFP_NET_PTYPE_OUTER_L3_IPV6 1 +#define NFP_NET_PTYPE_OUTER_L3_IPV4 2 + +#define NFP_PTYPE2RTE(tunnel, type) ((tunnel) ? RTE_PTYPE_INNER_##type : RTE_PTYPE_##type) + +/* Record NFP packet type parsed from rxd.offload_info. */ +struct nfp_ptype_parsed { + uint8_t l4_ptype; /**< Packet type of layer 4, or inner layer 4. */ + uint8_t l3_ptype; /**< Packet type of layer 3, or inner layer 3. */ + uint8_t tunnel_ptype; /**< Packet type of tunnel. */ + uint8_t outer_l3_ptype; /**< Packet type of outer layer 3. */ +}; + +/* Set mbuf checksum flags based on RX descriptor flags */ +void +nfp_net_rx_cksum(struct nfp_net_rxq *rxq, + struct nfp_net_rx_desc *rxd, + struct rte_mbuf *mb) +{ + struct nfp_net_hw *hw = rxq->hw; + + if ((hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM) == 0) + return; + + /* If IPv4 and IP checksum error, fail */ + if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) != 0 && + (rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK) == 0)) + mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + /* If neither UDP nor TCP return */ + if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) == 0 && + (rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) == 0) + return; + + if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK) != 0) + mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + else + mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; +} + static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) { - struct nfp_net_dp_buf *rxe = rxq->rxbufs; + uint16_t i; uint64_t dma_addr; - unsigned int i; + struct nfp_net_dp_buf *rxe = rxq->rxbufs; - PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %u descriptors", - rxq->rx_count); + PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %hu descriptors", + rxq->rx_count); for (i = 0; i < rxq->rx_count; i++) { struct nfp_net_rx_desc *rxd; @@ -44,16 +190,15 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) rxd->fld.dd = 0; rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff; rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; + rxe[i].mbuf = mbuf; - PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr); } /* Make sure all writes are flushed before telling the hardware */ rte_wmb(); /* Not advertising the whole ring as the firmware gets confused if so */ - PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u", - rxq->rx_count - 1); + PMD_RX_LOG(DEBUG, "Increment FL write pointer in %hu", rxq->rx_count - 1); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); @@ -63,37 +208,34 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) { - int i; + uint16_t i; for (i = 0; i < dev->data->nb_rx_queues; i++) { - if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0) + if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) != 0) return -1; } + return 0; } uint32_t nfp_net_rx_queue_count(void *rx_queue) { + uint32_t idx; + uint32_t count = 0; struct nfp_net_rxq *rxq; struct nfp_net_rx_desc *rxds; - uint32_t idx; - uint32_t count; rxq = rx_queue; - idx = rxq->rd_p; - count = 0; - /* * Other PMDs are just checking the DD bit in intervals of 4 * descriptors and counting all four if the first has the DD * bit on. Of course, this is not accurate but can be good for * performance. But ideally that should be done in descriptors - * chunks belonging to the same cache line + * chunks belonging to the same cache line. */ - while (count < rxq->rx_count) { rxds = &rxq->rxds[idx]; if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) @@ -102,7 +244,7 @@ nfp_net_rx_queue_count(void *rx_queue) count++; idx++; - /* Wrapping? */ + /* Wrapping */ if ((idx) == rxq->rx_count) idx = 0; } @@ -110,15 +252,15 @@ nfp_net_rx_queue_count(void *rx_queue) return count; } -/* nfp_net_parse_chained_meta() - Parse the chained metadata from packet */ +/* Parse the chained metadata from packet */ static bool nfp_net_parse_chained_meta(uint8_t *meta_base, rte_be32_t meta_header, struct nfp_meta_parsed *meta) { - uint8_t *meta_offset; uint32_t meta_info; uint32_t vlan_info; + uint8_t *meta_offset; meta_info = rte_be_to_cpu_32(meta_header); meta_offset = meta_base + 4; @@ -142,7 +284,11 @@ nfp_net_parse_chained_meta(uint8_t *meta_base, meta->vlan[meta->vlan_layer].tci = vlan_info & NFP_NET_META_VLAN_MASK; meta->vlan[meta->vlan_layer].tpid = NFP_NET_META_TPID(vlan_info); - ++meta->vlan_layer; + meta->vlan_layer++; + break; + case NFP_NET_META_IPSEC: + meta->sa_idx = rte_be_to_cpu_32(*(rte_be32_t *)meta_offset); + meta->ipsec_type = meta_info & NFP_NET_META_FIELD_MASK; break; default: /* Unsupported metadata can be a performance issue */ @@ -153,12 +299,7 @@ nfp_net_parse_chained_meta(uint8_t *meta_base, return true; } -/* - * nfp_net_parse_meta_hash() - Set mbuf hash data based on the metadata info - * - * The RSS hash and hash-type are prepended to the packet data. - * Extract and decode it and set the mbuf fields. - */ +/* Set mbuf hash data based on the metadata info */ static void nfp_net_parse_meta_hash(const struct nfp_meta_parsed *meta, struct nfp_net_rxq *rxq, @@ -174,7 +315,7 @@ nfp_net_parse_meta_hash(const struct nfp_meta_parsed *meta, } /* - * nfp_net_parse_single_meta() - Parse the single metadata + * Parse the single metadata * * The RSS hash and hash-type are prepended to the packet data. * Get it from metadata area. @@ -188,12 +329,7 @@ nfp_net_parse_single_meta(uint8_t *meta_base, meta->hash = rte_be_to_cpu_32(*(rte_be32_t *)(meta_base + 4)); } -/* - * nfp_net_parse_meta_vlan() - Set mbuf vlan_strip data based on metadata info - * - * The VLAN info TPID and TCI are prepended to the packet data. - * Extract and decode it and set the mbuf fields. - */ +/* Set mbuf vlan_strip data based on metadata info */ static void nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta, struct nfp_net_rx_desc *rxd, @@ -202,22 +338,17 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta, { struct nfp_net_hw *hw = rxq->hw; - /* Skip if hardware don't support setting vlan. */ + /* Skip if firmware don't support setting vlan. */ if ((hw->ctrl & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) == 0) return; /* - * The nic support the two way to send the VLAN info, - * 1. According the metadata to send the VLAN info when NFP_NET_CFG_CTRL_RXVLAN_V2 - * is set - * 2. According the descriptor to sned the VLAN info when NFP_NET_CFG_CTRL_RXVLAN - * is set - * - * If the nic doesn't send the VLAN info, it is not necessary - * to do anything. + * The firmware support two ways to send the VLAN info (with priority) : + * 1. Using the metadata when NFP_NET_CFG_CTRL_RXVLAN_V2 is set, + * 2. Using the descriptor when NFP_NET_CFG_CTRL_RXVLAN is set. */ if ((hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) { - if (meta->vlan_layer >= 1 && meta->vlan[0].offload != 0) { + if (meta->vlan_layer > 0 && meta->vlan[0].offload != 0) { mb->vlan_tci = rte_cpu_to_le_32(meta->vlan[0].tci); mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; } @@ -230,7 +361,7 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta, } /* - * nfp_net_parse_meta_qinq() - Set mbuf qinq_strip data based on metadata info + * Set mbuf qinq_strip data based on metadata info * * The out VLAN tci are prepended to the packet data. * Extract and decode it and set the mbuf fields. @@ -263,13 +394,47 @@ nfp_net_parse_meta_qinq(const struct nfp_meta_parsed *meta, if (meta->vlan[0].offload == 0) mb->vlan_tci = rte_cpu_to_le_16(meta->vlan[0].tci); + mb->vlan_tci_outer = rte_cpu_to_le_16(meta->vlan[1].tci); - PMD_RX_LOG(DEBUG, "Received outer vlan is %u inter vlan is %u", + PMD_RX_LOG(DEBUG, "Received outer vlan TCI is %u inner vlan TCI is %u", mb->vlan_tci_outer, mb->vlan_tci); mb->ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED; } -/* nfp_net_parse_meta() - Parse the metadata from packet */ +/* + * Set mbuf IPsec Offload features based on metadata info. + * + * The IPsec Offload features is prepended to the mbuf ol_flags. + * Extract and decode metadata info and set the mbuf ol_flags. + */ +static void +nfp_net_parse_meta_ipsec(struct nfp_meta_parsed *meta, + struct nfp_net_rxq *rxq, + struct rte_mbuf *mbuf) +{ + int offset; + uint32_t sa_idx; + struct nfp_net_hw *hw; + struct nfp_tx_ipsec_desc_msg *desc_md; + + hw = rxq->hw; + sa_idx = meta->sa_idx; + + if (meta->ipsec_type != NFP_NET_META_IPSEC) + return; + + if (sa_idx >= NFP_NET_IPSEC_MAX_SA_CNT) { + mbuf->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED; + } else { + mbuf->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD; + offset = hw->ipsec_data->pkt_dynfield_offset; + desc_md = RTE_MBUF_DYNFIELD(mbuf, offset, struct nfp_tx_ipsec_desc_msg *); + desc_md->sa_idx = sa_idx; + desc_md->enc = 0; + } +} + +/* Parse the metadata from packet */ static void nfp_net_parse_meta(struct nfp_net_rx_desc *rxds, struct nfp_net_rxq *rxq, @@ -293,6 +458,7 @@ nfp_net_parse_meta(struct nfp_net_rx_desc *rxds, nfp_net_parse_meta_hash(meta, rxq, mb); nfp_net_parse_meta_vlan(meta, rxds, rxq, mb); nfp_net_parse_meta_qinq(meta, rxq, mb); + nfp_net_parse_meta_ipsec(meta, rxq, mb); } else { PMD_RX_LOG(DEBUG, "RX chained metadata format is wrong!"); } @@ -317,7 +483,8 @@ nfp_net_parse_meta(struct nfp_net_rx_desc *rxds, * Mbuf to set the packet type. */ static void -nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, struct rte_mbuf *mb) +nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, + struct rte_mbuf *mb) { uint32_t mbuf_ptype = RTE_PTYPE_L2_ETHER; uint8_t nfp_tunnel_ptype = nfp_ptype->tunnel_ptype; @@ -470,28 +637,29 @@ nfp_net_parse_ptype(struct nfp_net_rx_desc *rxds, * doing now have any benefit at all. Again, tests with this change have not * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing * so looking at the implications of this type of allocation should be studied - * deeply + * deeply. */ - uint16_t -nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +nfp_net_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { - struct nfp_net_rxq *rxq; - struct nfp_net_rx_desc *rxds; - struct nfp_net_dp_buf *rxb; - struct nfp_net_hw *hw; + uint64_t dma_addr; + uint16_t avail = 0; struct rte_mbuf *mb; + uint16_t nb_hold = 0; + struct nfp_net_hw *hw; struct rte_mbuf *new_mb; - uint16_t nb_hold; - uint64_t dma_addr; - uint16_t avail; + struct nfp_net_rxq *rxq; + struct nfp_net_dp_buf *rxb; + struct nfp_net_rx_desc *rxds; uint16_t avail_multiplexed = 0; rxq = rx_queue; if (unlikely(rxq == NULL)) { /* * DPDK just checks the queue is lower than max queues - * enabled. But the queue needs to be configured + * enabled. But the queue needs to be configured. */ PMD_RX_LOG(ERR, "RX Bad queue"); return 0; @@ -499,8 +667,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) hw = rxq->hw; - avail = 0; - nb_hold = 0; while (avail + avail_multiplexed < nb_pkts) { rxb = &rxq->rxbufs[rxq->rd_p]; if (unlikely(rxb == NULL)) { @@ -520,12 +686,11 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* * We got a packet. Let's alloc a new mbuf for refilling the - * free descriptor ring as soon as possible + * free descriptor ring as soon as possible. */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); if (unlikely(new_mb == NULL)) { - PMD_RX_LOG(DEBUG, - "RX mbuf alloc failed port_id=%u queue_id=%hu", + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu", rxq->port_id, rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); break; @@ -533,45 +698,35 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* * Grab the mbuf and refill the descriptor with the - * previously allocated mbuf + * previously allocated mbuf. */ mb = rxb->mbuf; rxb->mbuf = new_mb; PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u", - rxds->rxd.data_len, rxq->mbuf_size); + rxds->rxd.data_len, rxq->mbuf_size); /* Size of this segment */ mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); /* Size of the whole packet. We just support 1 segment */ mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); - if (unlikely((mb->data_len + hw->rx_offset) > - rxq->mbuf_size)) { + if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) { /* * This should not happen and the user has the * responsibility of avoiding it. But we have - * to give some info about the error + * to give some info about the error. */ - PMD_RX_LOG(ERR, - "mbuf overflow likely due to the RX offset.\n" - "\t\tYour mbuf size should have extra space for" - " RX offset=%u bytes.\n" - "\t\tCurrently you just have %u bytes available" - " but the received packet is %u bytes long", - hw->rx_offset, - rxq->mbuf_size - hw->rx_offset, - mb->data_len); + PMD_RX_LOG(ERR, "mbuf overflow likely due to the RX offset."); rte_pktmbuf_free(mb); break; } /* Filling the received mbuf with packet info */ - if (hw->rx_offset) + if (hw->rx_offset != 0) mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; else - mb->data_off = RTE_PKTMBUF_HEADROOM + - NFP_DESC_META_LEN(rxds); + mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds); /* No scatter mode supported */ mb->nb_segs = 1; @@ -605,7 +760,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) nb_hold++; rxq->rd_p++; - if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/ + if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */ rxq->rd_p = 0; } @@ -613,18 +768,18 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return nb_hold; PMD_RX_LOG(DEBUG, "RX port_id=%hu queue_id=%hu, %hu packets received", - rxq->port_id, rxq->qidx, avail); + rxq->port_id, rxq->qidx, avail); nb_hold += rxq->nb_rx_hold; /* * FL descriptors needs to be written before incrementing the - * FL queue WR pointer + * FL queue WR pointer. */ rte_wmb(); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu", - rxq->port_id, rxq->qidx, nb_hold, avail); + rxq->port_id, rxq->qidx, nb_hold, avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); nb_hold = 0; } @@ -636,13 +791,13 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) static void nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) { - unsigned int i; + uint16_t i; if (rxq->rxbufs == NULL) return; for (i = 0; i < rxq->rx_count; i++) { - if (rxq->rxbufs[i].mbuf) { + if (rxq->rxbufs[i].mbuf != NULL) { rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf); rxq->rxbufs[i].mbuf = NULL; } @@ -650,11 +805,12 @@ nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) } void -nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) +nfp_net_rx_queue_release(struct rte_eth_dev *dev, + uint16_t queue_idx) { struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx]; - if (rxq) { + if (rxq != NULL) { nfp_net_rx_queue_release_mbufs(rxq); rte_eth_dma_zone_free(dev, "rx_ring", queue_idx); rte_free(rxq->rxbufs); @@ -672,47 +828,43 @@ nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq) int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp) + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) { - int ret; + uint32_t rx_desc_sz; uint16_t min_rx_desc; uint16_t max_rx_desc; - const struct rte_memzone *tz; - struct nfp_net_rxq *rxq; struct nfp_net_hw *hw; - uint32_t rx_desc_sz; + struct nfp_net_rxq *rxq; + const struct rte_memzone *tz; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_FUNC_TRACE(); - - ret = nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc); - if (ret != 0) - return ret; + nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc); /* Validating number of descriptors */ rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc); if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 || - nb_desc > max_rx_desc || nb_desc < min_rx_desc) { + nb_desc > max_rx_desc || nb_desc < min_rx_desc) { PMD_DRV_LOG(ERR, "Wrong nb_desc value"); return -EINVAL; } /* * Free memory prior to re-allocation if needed. This is the case after - * calling nfp_net_stop + * calling @nfp_net_stop(). */ - if (dev->data->rx_queues[queue_idx]) { + if (dev->data->rx_queues[queue_idx] != NULL) { nfp_net_rx_queue_release(dev, queue_idx); dev->data->rx_queues[queue_idx] = NULL; } /* Allocating rx queue data structure */ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq), - RTE_CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, socket_id); if (rxq == NULL) return -ENOMEM; @@ -725,7 +877,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, /* * Tracking mbuf size for detecting a potential mbuf overflow due to - * RX offset + * RX offset. */ rxq->mem_pool = mp; rxq->mbuf_size = rxq->mem_pool->elt_size; @@ -742,10 +894,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, * resizing in later calls to the queue setup function. */ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, - sizeof(struct nfp_net_rx_desc) * - max_rx_desc, NFP_MEMZONE_ALIGN, - socket_id); - + sizeof(struct nfp_net_rx_desc) * max_rx_desc, + NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating rx dma"); nfp_net_rx_queue_release(dev, queue_idx); @@ -757,26 +907,23 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, rxq->dma = (uint64_t)tz->iova; rxq->rxds = tz->addr; - /* mbuf pointers array for referencing mbufs linked to RX descriptors */ + /* Mbuf pointers array for referencing mbufs linked to RX descriptors */ rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs", - sizeof(*rxq->rxbufs) * nb_desc, - RTE_CACHE_LINE_SIZE, socket_id); + sizeof(*rxq->rxbufs) * nb_desc, RTE_CACHE_LINE_SIZE, + socket_id); if (rxq->rxbufs == NULL) { nfp_net_rx_queue_release(dev, queue_idx); dev->data->rx_queues[queue_idx] = NULL; return -ENOMEM; } - PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, - rxq->rxbufs, rxq->rxds, (unsigned long)rxq->dma); - nfp_net_reset_rx_queue(rxq); rxq->hw = hw; /* * Telling the HW about the physical address of the RX ring and number - * of descriptors in log2 format + * of descriptors in log2 format. */ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc)); @@ -784,28 +931,31 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, return 0; } -/* - * nfp_net_tx_free_bufs - Check for descriptors with a complete - * status - * @txq: TX queue to work with - * Returns number of descriptors freed +/** + * Check for descriptors with a complete status + * + * @param txq + * TX queue to work with + * + * @return + * Number of descriptors freed */ -int +uint32_t nfp_net_tx_free_bufs(struct nfp_net_txq *txq) { + uint32_t todo; uint32_t qcp_rd_p; - int todo; PMD_TX_LOG(DEBUG, "queue %hu. Check for descriptor with a complete" - " status", txq->qidx); + " status", txq->qidx); /* Work out how many packets have been sent */ qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR); if (qcp_rd_p == txq->rd_p) { PMD_TX_LOG(DEBUG, "queue %hu: It seems harrier is not sending " - "packets (%u, %u)", txq->qidx, - qcp_rd_p, txq->rd_p); + "packets (%u, %u)", txq->qidx, + qcp_rd_p, txq->rd_p); return 0; } @@ -815,7 +965,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) todo = qcp_rd_p + txq->tx_count - txq->rd_p; PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u", - qcp_rd_p, txq->rd_p, txq->rd_p); + qcp_rd_p, txq->rd_p, txq->rd_p); if (todo == 0) return todo; @@ -830,13 +980,13 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) static void nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) { - unsigned int i; + uint32_t i; if (txq->txbufs == NULL) return; for (i = 0; i < txq->tx_count; i++) { - if (txq->txbufs[i].mbuf) { + if (txq->txbufs[i].mbuf != NULL) { rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } @@ -844,11 +994,12 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) } void -nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) +nfp_net_tx_queue_release(struct rte_eth_dev *dev, + uint16_t queue_idx) { struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx]; - if (txq) { + if (txq != NULL) { nfp_net_tx_queue_release_mbufs(txq); rte_eth_dma_zone_free(dev, "tx_ring", queue_idx); rte_free(txq->txbufs); @@ -869,8 +1020,8 @@ nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data, struct rte_mbuf *pkt, uint8_t layer) { - uint16_t vlan_tci; uint16_t tpid; + uint16_t vlan_tci; tpid = RTE_ETHER_TYPE_VLAN; vlan_tci = pkt->vlan_tci; @@ -878,6 +1029,36 @@ nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data, meta_data->data[layer] = rte_cpu_to_be_32(tpid << 16 | vlan_tci); } +void +nfp_net_set_meta_ipsec(struct nfp_net_meta_raw *meta_data, + struct nfp_net_txq *txq, + struct rte_mbuf *pkt, + uint8_t layer, + uint8_t ipsec_layer) +{ + int offset; + struct nfp_net_hw *hw; + struct nfp_tx_ipsec_desc_msg *desc_md; + + hw = txq->hw; + offset = hw->ipsec_data->pkt_dynfield_offset; + desc_md = RTE_MBUF_DYNFIELD(pkt, offset, struct nfp_tx_ipsec_desc_msg *); + + switch (ipsec_layer) { + case NFP_IPSEC_META_SAIDX: + meta_data->data[layer] = desc_md->sa_idx; + break; + case NFP_IPSEC_META_SEQLOW: + meta_data->data[layer] = desc_md->esn.low; + break; + case NFP_IPSEC_META_SEQHI: + meta_data->data[layer] = desc_md->esn.hi; + break; + default: + break; + } +} + int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h index 48e548b02db..956cc7a0d2a 100644 --- a/drivers/net/nfp/nfp_rxtx.h +++ b/drivers/net/nfp/nfp_rxtx.h @@ -3,10 +3,10 @@ * All rights reserved. */ -#ifndef _NFP_RXTX_H_ -#define _NFP_RXTX_H_ +#ifndef __NFP_RXTX_H__ +#define __NFP_RXTX_H__ -#include +#include #define NFP_DESC_META_LEN(d) ((d)->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK) @@ -16,85 +16,35 @@ #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \ ((uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)) -/* Maximum number of supported VLANs in parsed form packet metadata. */ -#define NFP_META_MAX_VLANS 2 /* Maximum number of NFP packet metadata fields. */ #define NFP_META_MAX_FIELDS 8 -/* - * struct nfp_net_meta_raw - Raw memory representation of packet metadata - * - * Describe the raw metadata format, useful when preparing metadata for a - * transmission mbuf. - * - * @header: NFD3 or NFDk field type header (see format in nfp.rst) - * @data: Array of each fields data member - * @length: Keep track of number of valid fields in @header and data. Not part - * of the raw metadata. - */ +/* Describe the raw metadata format. */ struct nfp_net_meta_raw { - uint32_t header; - uint32_t data[NFP_META_MAX_FIELDS]; - uint8_t length; -}; - -/* - * struct nfp_meta_parsed - Record metadata parsed from packet - * - * Parsed NFP packet metadata are recorded in this struct. The content is - * read-only after it have been recorded during parsing by nfp_net_parse_meta(). - * - * @port_id: Port id value - * @hash: RSS hash value - * @hash_type: RSS hash type - * @vlan_layer: The layers of VLAN info which are passed from nic. - * Only this number of entries of the @vlan array are valid. - * - * @vlan: Holds information parses from NFP_NET_META_VLAN. The inner most vlan - * starts at position 0 and only @vlan_layer entries contain valid - * information. - * - * Currently only 2 layers of vlan are supported, - * vlan[0] - vlan strip info - * vlan[1] - qinq strip info - * - * @vlan.offload: Flag indicates whether VLAN is offloaded - * @vlan.tpid: Vlan TPID - * @vlan.tci: Vlan TCI including PCP + Priority + VID - */ -struct nfp_meta_parsed { - uint32_t port_id; - uint32_t hash; - uint8_t hash_type; - uint8_t vlan_layer; - struct { - uint8_t offload; - uint8_t tpid; - uint16_t tci; - } vlan[NFP_META_MAX_VLANS]; + uint32_t header; /**< Field type header (see format in nfp.rst) */ + uint32_t data[NFP_META_MAX_FIELDS]; /**< Array of each fields data member */ + uint8_t length; /**< Number of valid fields in @header */ }; -/* - * The maximum number of descriptors is limited by design as - * DPDK uses uint16_t variables for these values - */ -#define NFP_NET_MAX_TX_DESC (32 * 1024) -#define NFP_NET_MIN_TX_DESC 256 -#define NFP3800_NET_MIN_TX_DESC 512 - -#define NFP_NET_MAX_RX_DESC (32 * 1024) -#define NFP_NET_MIN_RX_DESC 256 -#define NFP3800_NET_MIN_RX_DESC 512 - /* Descriptor alignment */ #define NFP_ALIGN_RING_DESC 128 -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) - struct nfp_net_dp_buf { struct rte_mbuf *mbuf; }; +struct nfp_tx_ipsec_desc_msg { + uint32_t sa_idx; /**< SA index in driver table */ + uint32_t enc; /**< IPsec enable flag */ + union { + uint64_t value; + struct { + uint32_t low; + uint32_t hi; + }; + } esn; /**< Extended Sequence Number */ +}; + struct nfp_net_txq { /** Backpointer to nfp_net structure */ struct nfp_net_hw *hw; @@ -172,102 +122,6 @@ struct nfp_net_txq { #define PCIE_DESC_RX_L4_CSUM_OK (PCIE_DESC_RX_TCP_CSUM_OK | \ PCIE_DESC_RX_UDP_CSUM_OK) -/* - * The bit format and map of nfp packet type for rxd.offload_info in Rx descriptor. - * - * Bit format about nfp packet type refers to the following: - * --------------------------------- - * 1 0 - * 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | |ol3|tunnel | l3 | l4 | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * - * Bit map about nfp packet type refers to the following: - * - * L4: bit 0~2, used for layer 4 or inner layer 4. - * 000: NFP_NET_PTYPE_L4_NONE - * 001: NFP_NET_PTYPE_L4_TCP - * 010: NFP_NET_PTYPE_L4_UDP - * 011: NFP_NET_PTYPE_L4_FRAG - * 100: NFP_NET_PTYPE_L4_NONFRAG - * 101: NFP_NET_PTYPE_L4_ICMP - * 110: NFP_NET_PTYPE_L4_SCTP - * 111: reserved - * - * L3: bit 3~5, used for layer 3 or inner layer 3. - * 000: NFP_NET_PTYPE_L3_NONE - * 001: NFP_NET_PTYPE_L3_IPV6 - * 010: NFP_NET_PTYPE_L3_IPV4 - * 011: NFP_NET_PTYPE_L3_IPV4_EXT - * 100: NFP_NET_PTYPE_L3_IPV6_EXT - * 101: NFP_NET_PTYPE_L3_IPV4_EXT_UNKNOWN - * 110: NFP_NET_PTYPE_L3_IPV6_EXT_UNKNOWN - * 111: reserved - * - * Tunnel: bit 6~9, used for tunnel. - * 0000: NFP_NET_PTYPE_TUNNEL_NONE - * 0001: NFP_NET_PTYPE_TUNNEL_VXLAN - * 0100: NFP_NET_PTYPE_TUNNEL_NVGRE - * 0101: NFP_NET_PTYPE_TUNNEL_GENEVE - * 0010, 0011, 0110~1111: reserved - * - * Outer L3: bit 10~11, used for outer layer 3. - * 00: NFP_NET_PTYPE_OUTER_L3_NONE - * 01: NFP_NET_PTYPE_OUTER_L3_IPV6 - * 10: NFP_NET_PTYPE_OUTER_L3_IPV4 - * 11: reserved - * - * Reserved: bit 10~15, used for extension. - */ - -/* Mask and offset about nfp packet type based on the bit map above. */ -#define NFP_NET_PTYPE_L4_MASK 0x0007 -#define NFP_NET_PTYPE_L3_MASK 0x0038 -#define NFP_NET_PTYPE_TUNNEL_MASK 0x03c0 -#define NFP_NET_PTYPE_OUTER_L3_MASK 0x0c00 - -#define NFP_NET_PTYPE_L4_OFFSET 0 -#define NFP_NET_PTYPE_L3_OFFSET 3 -#define NFP_NET_PTYPE_TUNNEL_OFFSET 6 -#define NFP_NET_PTYPE_OUTER_L3_OFFSET 10 - -/* Case about nfp packet type based on the bit map above. */ -#define NFP_NET_PTYPE_L4_NONE 0 -#define NFP_NET_PTYPE_L4_TCP 1 -#define NFP_NET_PTYPE_L4_UDP 2 -#define NFP_NET_PTYPE_L4_FRAG 3 -#define NFP_NET_PTYPE_L4_NONFRAG 4 -#define NFP_NET_PTYPE_L4_ICMP 5 -#define NFP_NET_PTYPE_L4_SCTP 6 - -#define NFP_NET_PTYPE_L3_NONE 0 -#define NFP_NET_PTYPE_L3_IPV6 1 -#define NFP_NET_PTYPE_L3_IPV4 2 -#define NFP_NET_PTYPE_L3_IPV4_EXT 3 -#define NFP_NET_PTYPE_L3_IPV6_EXT 4 -#define NFP_NET_PTYPE_L3_IPV4_EXT_UNKNOWN 5 -#define NFP_NET_PTYPE_L3_IPV6_EXT_UNKNOWN 6 - -#define NFP_NET_PTYPE_TUNNEL_NONE 0 -#define NFP_NET_PTYPE_TUNNEL_VXLAN 1 -#define NFP_NET_PTYPE_TUNNEL_NVGRE 4 -#define NFP_NET_PTYPE_TUNNEL_GENEVE 5 - -#define NFP_NET_PTYPE_OUTER_L3_NONE 0 -#define NFP_NET_PTYPE_OUTER_L3_IPV6 1 -#define NFP_NET_PTYPE_OUTER_L3_IPV4 2 - -#define NFP_PTYPE2RTE(tunnel, type) ((tunnel) ? RTE_PTYPE_INNER_##type : RTE_PTYPE_##type) - -/* Record NFP packet type parsed from rxd.offload_info. */ -struct nfp_ptype_parsed { - uint8_t l4_ptype; /**< Packet type of layer 4, or inner layer 4. */ - uint8_t l3_ptype; /**< Packet type of layer 3, or inner layer 3. */ - uint8_t tunnel_ptype; /**< Packet type of tunnel. */ - uint8_t outer_l3_ptype; /**< Packet type of outer layer 3. */ -}; - struct nfp_net_rx_desc { union { /** Freelist descriptor. */ @@ -369,44 +223,18 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; } -/* set mbuf checksum flags based on RX descriptor flags */ -static inline void -nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, - struct rte_mbuf *mb) -{ - struct nfp_net_hw *hw = rxq->hw; - - if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM)) - return; - - /* If IPv4 and IP checksum error, fail */ - if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))) - mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; - else - mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; - - /* If neither UDP nor TCP return */ - if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM)) - return; - - if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK)) - mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; - else - mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; -} - +void nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, + struct rte_mbuf *mb); int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev); uint32_t nfp_net_rx_queue_count(void *rx_queue); uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); + uint16_t nb_pkts); void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); void nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq); int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, - uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp); + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); void nfp_net_reset_tx_queue(struct nfp_net_txq *txq); @@ -415,9 +243,14 @@ int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); -int nfp_net_tx_free_bufs(struct nfp_net_txq *txq); +uint32_t nfp_net_tx_free_bufs(struct nfp_net_txq *txq); void nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data, struct rte_mbuf *pkt, uint8_t layer); +void nfp_net_set_meta_ipsec(struct nfp_net_meta_raw *meta_data, + struct nfp_net_txq *txq, + struct rte_mbuf *pkt, + uint8_t layer, + uint8_t ipsec_layer); -#endif /* _NFP_RXTX_H_ */ +#endif /* __NFP_RXTX_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h index 7750a0218eb..4c9ac017cc2 100644 --- a/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h +++ b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h @@ -6,21 +6,6 @@ #ifndef __NFP_NFP6000_H__ #define __NFP_NFP6000_H__ -/* CPP Target IDs */ -#define NFP_CPP_TARGET_INVALID 0 -#define NFP_CPP_TARGET_NBI 1 -#define NFP_CPP_TARGET_QDR 2 -#define NFP_CPP_TARGET_ILA 6 -#define NFP_CPP_TARGET_MU 7 -#define NFP_CPP_TARGET_PCIE 9 -#define NFP_CPP_TARGET_ARM 10 -#define NFP_CPP_TARGET_CRYPTO 12 -#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ -#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ -#define NFP_CPP_TARGET_CT_XPB 14 -#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 -#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH - #define NFP_ISL_EMEM0 24 #define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL @@ -52,9 +37,4 @@ nfp_cppat_mu_locality_lsb(int mode, int addr40) } } -int nfp_target_pushpull(uint32_t cpp_id, uint64_t address); -int nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address, - uint32_t *cpp_target_id, uint64_t *cpp_target_address, - const uint32_t *imb_table); - #endif /* NFP_NFP6000_H */ diff --git a/drivers/net/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/nfp/nfpcore/nfp6000_pcie.c new file mode 100644 index 00000000000..a6fd89b6c85 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp6000_pcie.c @@ -0,0 +1,1037 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_cpp_pcie_ops.c + * Authors: Vinayak Tammineedi + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed and allocated if they are available. + * The generic CPP bus abstraction builds upon this BAR interface. + */ + +#include "nfp6000_pcie.h" + +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp_logs.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" +#include "../nfp_logs.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) + +#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS_OF(_x) (((_x) >> 16) & 0x1f) +#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS_OF(_x) (((_x) >> 0) & 0xffff) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_OF(_x) (((_x) >> 27) & 0x3) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS_OF(_x) (((_x) >> 23) & 0xf) +#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21) +#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS_OF(_x) (((_x) >> 21) & 0x3) + +/* + * Minimal size of the PCIe cfg memory we depend on being mapped, + * queue controller and DMA controller don't have to be covered. + */ +#define NFP_PCI_MIN_MAP_SIZE 0x080000 /* 512K */ + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index) ((bar_index) * 4) + +struct nfp_pcie_user; +struct nfp6000_area_priv; + +/* Describes BAR configuration and usage */ +struct nfp_bar { + struct nfp_pcie_user *nfp; /**< Backlink to owner */ + uint32_t barcfg; /**< BAR config CSR */ + uint64_t base; /**< Base CPP offset */ + uint64_t mask; /**< Mask of the BAR aperture (read only) */ + uint32_t bitsize; /**< Bit size of the BAR aperture (read only) */ + uint32_t index; /**< Index of the BAR */ + bool lock; /**< If the BAR has been locked */ + + char *iomem; /**< mapped IO memory */ + struct rte_mem_resource *resource; /**< IOMEM resource window */ +}; + +#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8) + +struct nfp_pcie_user { + struct rte_pci_device *pci_dev; + const struct nfp_dev_info *dev_info; + + int lock; + + /* PCI BAR management */ + uint32_t bars; + struct nfp_bar bar[NFP_PCI_BAR_MAX]; + + /* Reserved BAR access */ + char *csr; +}; + +/* Generic CPP bus access interface. */ +struct nfp6000_area_priv { + struct nfp_bar *bar; + uint32_t bar_offset; + + int target; + int action; + int token; + uint64_t offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + char *iomem; +}; + +static uint32_t +nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +nfp_compute_bar(const struct nfp_bar *bar, + uint32_t *bar_config, + uint64_t *bar_base, + int target, + int action, + int token, + uint64_t offset, + size_t size, + int width) +{ + uint64_t mask; + uint32_t newcfg; + uint32_t bitsize; + + if (target >= NFP_CPP_NUM_TARGETS) + return -EINVAL; + + switch (width) { + case 8: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT); + break; + case 4: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT); + break; + case 0: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE); + break; + default: + return -EINVAL; + } + + if (action != NFP_CPP_ACTION_RW && action != 0) { + /* Fixed CPP mapping with specific action */ + mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(target); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(action); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(token); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + + offset &= mask; + bitsize = 40 - 16; + } else { + mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(target); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(token); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + + offset &= mask; + bitsize = 40 - 21; + } + newcfg |= offset >> bitsize; + + if (bar_base != NULL) + *bar_base = offset; + + if (bar_config != NULL) + *bar_config = newcfg; + + return 0; +} + +static int +nfp_bar_write(struct nfp_pcie_user *nfp, + struct nfp_bar *bar, + uint32_t newcfg) +{ + uint32_t xbar; + + xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index); + + if (nfp->csr != NULL) { + rte_write32(newcfg, nfp->csr + xbar); + /* Readback to ensure BAR is flushed */ + rte_read32(nfp->csr + xbar); + } else { + xbar += nfp->dev_info->pcie_cfg_expbar_offset; + if (rte_pci_write_config(nfp->pci_dev, &newcfg, sizeof(uint32_t), + xbar) < 0) + return -EIO; + } + + bar->barcfg = newcfg; + + return 0; +} + +static int +nfp_reconfigure_bar(struct nfp_pcie_user *nfp, + struct nfp_bar *bar, + int target, + int action, + int token, + uint64_t offset, + size_t size, + int width) +{ + int err; + uint32_t newcfg; + uint64_t newbase; + + err = nfp_compute_bar(bar, &newcfg, &newbase, target, action, + token, offset, size, width); + if (err != 0) + return err; + + bar->base = newbase; + + return nfp_bar_write(nfp, bar, newcfg); +} + +static uint32_t +nfp_bitsize_calc(uint64_t mask) +{ + uint64_t tmp = mask; + uint32_t bit_size = 0; + + if (tmp == 0) + return 0; + + for (; tmp != 0; tmp >>= 1) + bit_size++; + + return bit_size; +} + +static int +nfp_cmp_bars(const void *ptr_a, + const void *ptr_b) +{ + const struct nfp_bar *a = ptr_a; + const struct nfp_bar *b = ptr_b; + + if (a->bitsize == b->bitsize) + return a->index - b->index; + else + return a->bitsize - b->bitsize; +} + +static bool +nfp_bars_for_secondary(uint32_t index) +{ + uint8_t tmp = index & 0x07; + + if (tmp == 0x06 || tmp == 0x07) + return true; + else + return false; +} + +/** + * Map all PCI bars and fetch the actual BAR configurations from the board. + * We assume that the BAR with the PCIe config block is already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + * BAR0.1: -- + * BAR0.2: -- + * BAR0.3: -- + * BAR0.4: -- + * BAR0.5: -- + * BAR0.6: -- + * BAR0.7: -- + * + * BAR1.0-BAR1.7: -- + * BAR2.0-BAR2.7: -- + */ +static int +nfp_enable_bars(struct nfp_pcie_user *nfp) +{ + int pf; + uint32_t i; + uint8_t min_bars; + struct nfp_bar *bar; + enum rte_proc_type_t type; + struct rte_mem_resource *res; + const uint32_t barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) | + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT; + + type = rte_eal_process_type(); + if (type == RTE_PROC_PRIMARY) + min_bars = 12; + else + min_bars = 4; + + for (i = 0; i < RTE_DIM(nfp->bar); i++) { + if (i != 0) { + if (type == RTE_PROC_PRIMARY) { + if (nfp_bars_for_secondary(i)) + continue; + } else { + if (!nfp_bars_for_secondary(i)) + continue; + } + } + + /* 24 NFP bars mapping into BAR0, BAR2 and BAR4 */ + res = &nfp->pci_dev->mem_resource[(i >> 3) * 2]; + + /* Skip over BARs that are not mapped */ + if (res->addr != NULL) { + bar = &nfp->bar[i]; + bar->resource = res; + bar->barcfg = 0; + + bar->nfp = nfp; + bar->index = i; + /* The resource shared by 8 bars */ + bar->mask = (res->len >> 3) - 1; + bar->bitsize = nfp_bitsize_calc(bar->mask); + bar->base = 0; + bar->lock = false; + bar->iomem = (char *)res->addr + + ((bar->index & 7) << bar->bitsize); + + nfp->bars++; + } + } + + if (nfp->bars < min_bars) { + PMD_DRV_LOG(ERR, "Not enough usable BARs found."); + return -EINVAL; + } + + switch (nfp->pci_dev->id.device_id) { + case PCI_DEVICE_ID_NFP3800_PF_NIC: + pf = nfp->pci_dev->addr.function & 0x07; + nfp->csr = nfp->bar[0].iomem + NFP_PCIE_BAR(pf); + break; + case PCI_DEVICE_ID_NFP4000_PF_NIC: + case PCI_DEVICE_ID_NFP6000_PF_NIC: + nfp->csr = nfp->bar[0].iomem + NFP_PCIE_BAR(0); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported device ID: %04hx!", + nfp->pci_dev->id.device_id); + return -EINVAL; + } + + /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ + bar = &nfp->bar[0]; + bar->lock = true; + + if (nfp_bar_write(nfp, bar, barcfg_msix_general) < 0) + return -EIO; + + /* Sort bars by bit size - use the smallest possible first. */ + qsort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), nfp_cmp_bars); + + return 0; +} + +/* Check if BAR can be used with the given parameters. */ +static bool +matching_bar_exist(struct nfp_bar *bar, + int target, + int action, + int token, + uint64_t offset, + size_t size, + int width) +{ + int bar_width; + int bar_token; + int bar_target; + int bar_action; + uint32_t map_type; + + bar_width = NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_OF(bar->barcfg); + switch (bar_width) { + case NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT: + bar_width = 4; + break; + case NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT: + bar_width = 8; + break; + case NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE: + bar_width = 0; + break; + default: + bar_width = -1; + break; + } + + /* Make sure to match up the width */ + if (bar_width != width) + return false; + + bar_token = NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS_OF(bar->barcfg); + bar_action = NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS_OF(bar->barcfg); + map_type = NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg); + switch (map_type) { + case NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET: + bar_token = -1; + /* FALLTHROUGH */ + case NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK: + bar_action = NFP_CPP_ACTION_RW; + if (action == 0) + action = NFP_CPP_ACTION_RW; + /* FALLTHROUGH */ + case NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED: + break; + default: + /* We don't match explicit bars through the area interface */ + return false; + } + + bar_target = NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS_OF(bar->barcfg); + if ((bar_target < 0 || bar_target == target) && + (bar_token < 0 || bar_token == token) && + bar_action == action && + bar->base <= offset && + (bar->base + (1 << bar->bitsize)) >= (offset + size)) + return true; + + /* No match */ + return false; +} + +static int +find_matching_bar(struct nfp_pcie_user *nfp, + int target, + int action, + int token, + uint64_t offset, + size_t size, + int width) +{ + uint32_t n; + + for (n = 0; n < nfp->bars; n++) { + struct nfp_bar *bar = &nfp->bar[n]; + + if (bar->lock) + continue; + + if (matching_bar_exist(bar, target, action, token, + offset, size, width)) + return n; + } + + return -1; +} + +/* Return EAGAIN if no resource is available */ +static int +find_unused_bar_noblock(struct nfp_pcie_user *nfp, + int target, + int action, + int token, + uint64_t offset, + size_t size, + int width) +{ + int ret; + uint32_t n; + const struct nfp_bar *bar; + + for (n = 0; n < nfp->bars; n++) { + bar = &nfp->bar[n]; + + if (bar->bitsize == 0) + continue; + + /* Just check to see if we can make it fit... */ + ret = nfp_compute_bar(bar, NULL, NULL, target, action, + token, offset, size, width); + if (ret != 0) + continue; + + if (!bar->lock) + return n; + } + + return -EAGAIN; +} + +static int +nfp_alloc_bar(struct nfp_pcie_user *nfp, + struct nfp6000_area_priv *priv) +{ + int ret; + int bar_num; + size_t size = priv->size; + int token = priv->token; + int target = priv->target; + int action = priv->action; + int width = priv->width.bar; + uint64_t offset = priv->offset; + + /* Bar size should small than 16MB */ + if (size > (1 << 24)) + return -EINVAL; + + bar_num = find_matching_bar(nfp, target, action, token, + offset, size, width); + if (bar_num >= 0) { + /* Found a perfect match. */ + nfp->bar[bar_num].lock = true; + return bar_num; + } + + bar_num = find_unused_bar_noblock(nfp, target, action, token, + offset, size, width); + if (bar_num < 0) + return bar_num; + + nfp->bar[bar_num].lock = true; + ret = nfp_reconfigure_bar(nfp, &nfp->bar[bar_num], + target, action, token, offset, size, width); + if (ret < 0) { + nfp->bar[bar_num].lock = false; + return ret; + } + + return bar_num; +} + +static void +nfp_disable_bars(struct nfp_pcie_user *nfp) +{ + uint32_t i; + struct nfp_bar *bar; + + for (i = 0; i < nfp->bars; i++) { + bar = &nfp->bar[i]; + if (bar->iomem != NULL) { + bar->iomem = NULL; + bar->lock = false; + } + } +} + +static int +nfp6000_area_init(struct nfp_cpp_area *area, + uint32_t dest, + uint64_t address, + size_t size) +{ + int pp; + int ret = 0; + uint32_t token = NFP_CPP_ID_TOKEN_of(dest); + uint32_t target = NFP_CPP_ID_TARGET_of(dest); + uint32_t action = NFP_CPP_ID_ACTION_of(dest); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + + if (priv->width.read > 0 && + priv->width.write > 0 && + priv->width.read != priv->width.write) + return -EINVAL; + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + priv->bar = NULL; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + + return ret; +} + +static int +nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + int bar_num; + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + + /* Already allocated. */ + if (priv->bar != NULL) + return 0; + + bar_num = nfp_alloc_bar(nfp, priv); + if (bar_num < 0) { + PMD_DRV_LOG(ERR, "Failed to allocate bar %d:%d:%d:%#lx: %d", + priv->target, priv->action, priv->token, + priv->offset, bar_num); + return bar_num; + } + + priv->bar = &nfp->bar[bar_num]; + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar, + priv->target); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, + priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* Must have been too big. Sub-allocate. */ + if (priv->bar->iomem == NULL) + return -ENOMEM; + + priv->iomem = priv->bar->iomem + priv->bar_offset; + + return 0; +} + +static void +nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + priv->bar->lock = false; + priv->bar = NULL; + priv->iomem = NULL; +} + +static void * +nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + return priv->iomem; +} + +static int +nfp6000_area_read(struct nfp_cpp_area *area, + void *address, + uint32_t offset, + size_t length) +{ + int ret; + size_t n; + int width; + uint32_t *wrptr32 = address; + uint64_t *wrptr64 = address; + struct nfp6000_area_priv *priv; + const volatile uint32_t *rdptr32; + const volatile uint64_t *rdptr64; + + priv = nfp_cpp_area_priv(area); + rdptr64 = (uint64_t *)(priv->iomem + offset); + rdptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + if (width <= 0) + return -EINVAL; + + /* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(uint64_t) == 4 || + length % sizeof(uint64_t) == 4)) + width = TARGET_WIDTH_32; + + /* Unaligned? Translate to an explicit access */ + if (((priv->offset + offset) & (width - 1)) != 0) { + PMD_DRV_LOG(ERR, "aread_read unaligned!!!"); + return -EINVAL; + } + + if (priv->bar == NULL) + return -EFAULT; + + switch (width) { + case TARGET_WIDTH_32: + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + ret = n; + break; + case TARGET_WIDTH_64: + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + + ret = n; + break; + default: + return -EINVAL; + } + + return ret; +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, + const void *address, + uint32_t offset, + size_t length) +{ + int ret; + size_t n; + int width; + uint32_t *wrptr32; + uint64_t *wrptr64; + struct nfp6000_area_priv *priv; + const uint32_t *rdptr32 = address; + const uint64_t *rdptr64 = address; + + priv = nfp_cpp_area_priv(area); + wrptr64 = (uint64_t *)(priv->iomem + offset); + wrptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + if (width <= 0) + return -EINVAL; + + /* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(uint64_t) == 4 || + length % sizeof(uint64_t) == 4)) + width = TARGET_WIDTH_32; + + /* Unaligned? Translate to an explicit access */ + if (((priv->offset + offset) & (width - 1)) != 0) + return -EINVAL; + + if (priv->bar == NULL) + return -EFAULT; + + switch (width) { + case TARGET_WIDTH_32: + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + ret = n; + break; + case TARGET_WIDTH_64: + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + + ret = n; + break; + default: + return -EINVAL; + } + + return ret; +} + +static int +nfp_acquire_process_lock(struct nfp_pcie_user *desc) +{ + int rc; + struct flock lock; + char lockname[30]; + + memset(&lock, 0, sizeof(lock)); + + snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", + desc->pci_dev->device.name); + desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); + if (desc->lock < 0) + return desc->lock; + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + rc = -1; + while (rc != 0) { + rc = fcntl(desc->lock, F_SETLKW, &lock); + if (rc < 0) { + if (errno != EAGAIN && errno != EACCES) { + close(desc->lock); + return rc; + } + } + } + + return 0; +} + +static int +nfp6000_get_dsn(struct rte_pci_device *pci_dev, + uint64_t *dsn) +{ + off_t pos; + size_t len; + uint64_t tmp = 0; + + pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN); + if (pos <= 0) { + PMD_DRV_LOG(ERR, "PCI_EXT_CAP_ID_DSN not found"); + return -ENODEV; + } + + pos += 4; + len = sizeof(tmp); + + if (rte_pci_read_config(pci_dev, &tmp, len, pos) < 0) { + PMD_DRV_LOG(ERR, "nfp get device serial number failed"); + return -ENOENT; + } + + *dsn = tmp; + + return 0; +} + +static int +nfp6000_get_interface(struct rte_pci_device *dev, + uint16_t *interface) +{ + int ret; + uint64_t dsn = 0; + + ret = nfp6000_get_dsn(dev, &dsn); + if (ret != 0) + return ret; + + *interface = dsn & 0xffff; + + return 0; +} + +static int +nfp6000_get_serial(struct rte_pci_device *dev, + uint8_t *serial, + size_t length) +{ + int ret; + uint64_t dsn = 0; + + if (length < NFP_SERIAL_LEN) + return -ENOMEM; + + ret = nfp6000_get_dsn(dev, &dsn); + if (ret != 0) + return ret; + + serial[0] = (dsn >> 56) & 0xff; + serial[1] = (dsn >> 48) & 0xff; + serial[2] = (dsn >> 40) & 0xff; + serial[3] = (dsn >> 32) & 0xff; + serial[4] = (dsn >> 24) & 0xff; + serial[5] = (dsn >> 16) & 0xff; + + return 0; +} + +static int +nfp6000_init(struct nfp_cpp *cpp) +{ + int ret = 0; + struct nfp_pcie_user *desc = nfp_cpp_priv(cpp); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY && + nfp_cpp_driver_need_lock(cpp)) { + ret = nfp_acquire_process_lock(desc); + if (ret != 0) + return -1; + } + + ret = nfp_enable_bars(desc); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Enable bars failed"); + return -1; + } + + return 0; +} + +static void +nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp_pcie_user *desc = nfp_cpp_priv(cpp); + + nfp_disable_bars(desc); + if (nfp_cpp_driver_need_lock(cpp)) + close(desc->lock); + free(desc); +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .init = nfp6000_init, + .free = nfp6000_free, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + + .get_interface = nfp6000_get_interface, + .get_serial = nfp6000_get_serial, + + .area_init = nfp6000_area_init, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + .area_iomem = nfp6000_area_iomem, +}; + +const struct +nfp_cpp_operations *nfp_cpp_transport_operations(void) +{ + return &nfp6000_pcie_ops; +} + +/** + * Build a NFP CPP bus from a NFP6000 PCI device + * + * @param pdev + * NFP6000 PCI device + * @param driver_lock_needed + * driver lock flag + * + * @return + * NFP CPP handle or NULL + */ +struct nfp_cpp * +nfp_cpp_from_nfp6000_pcie(struct rte_pci_device *pci_dev, + const struct nfp_dev_info *dev_info, + bool driver_lock_needed) +{ + int ret; + struct nfp_cpp *cpp; + uint16_t interface = 0; + struct nfp_pcie_user *nfp; + + nfp = malloc(sizeof(*nfp)); + if (nfp == NULL) + return NULL; + + memset(nfp, 0, sizeof(*nfp)); + nfp->pci_dev = pci_dev; + nfp->dev_info = dev_info; + + ret = nfp6000_get_interface(pci_dev, &interface); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Get interface failed."); + free(nfp); + return NULL; + } + + if (NFP_CPP_INTERFACE_TYPE_of(interface) != NFP_CPP_INTERFACE_TYPE_PCI) { + PMD_DRV_LOG(ERR, "Interface type is not right."); + free(nfp); + return NULL; + } + + if (NFP_CPP_INTERFACE_CHANNEL_of(interface) != + NFP_CPP_INTERFACE_CHANNEL_PEROPENER) { + PMD_DRV_LOG(ERR, "Interface channel is not right"); + free(nfp); + return NULL; + } + + /* Probe for all the common NFP devices */ + cpp = nfp_cpp_from_device_name(pci_dev, nfp, driver_lock_needed); + if (cpp == NULL) { + PMD_DRV_LOG(ERR, "Get cpp from operation failed"); + free(nfp); + return NULL; + } + + return cpp; +} diff --git a/drivers/net/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/nfp/nfpcore/nfp6000_pcie.h new file mode 100644 index 00000000000..8e2cfb69e68 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp6000_pcie.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP6000_PCIE_H__ +#define __NFP6000_PCIE_H__ + +#include + +#include "nfp_cpp.h" +#include "nfp_dev.h" + +const struct nfp_cpp_operations *nfp_cpp_transport_operations(void); + +struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct rte_pci_device *pci_dev, + const struct nfp_dev_info *dev_info, + bool driver_lock_needed); + +#endif /* __NFP6000_PCIE_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_cpp.h b/drivers/net/nfp/nfpcore/nfp_cpp.h index 8f87c093275..2defc4fa16d 100644 --- a/drivers/net/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/nfp/nfpcore/nfp_cpp.h @@ -8,45 +8,17 @@ #include -struct nfp_cpp_mutex; +/* NFP CPP handle */ +struct nfp_cpp; -/* - * NFP CPP handle - */ -struct nfp_cpp { - uint32_t model; - uint32_t interface; - uint8_t *serial; - int serial_len; - void *priv; +/* NFP CPP device area handle */ +struct nfp_cpp_area; - /* Mutex cache */ - struct nfp_cpp_mutex *mutex_cache; - const struct nfp_cpp_operations *op; +#define NFP_SERIAL_LEN 6 - /* - * NFP-6xxx originating island IMB CPP Address Translation. CPP Target - * ID is index into array. Values are obtained at runtime from local - * island XPB CSRs. - */ - uint32_t imb_cat_table[16]; +#define NFP_CPP_NUM_TARGETS 16 - /* MU access type bit offset */ - uint32_t mu_locality_lsb; - - int driver_lock_needed; -}; - -/* - * NFP CPP device area handle - */ -struct nfp_cpp_area { - struct nfp_cpp *cpp; - char *name; - unsigned long long offset; - unsigned long size; - /* Here follows the 'priv' part of nfp_cpp_area. */ -}; +#define PCI_64BIT_BAR_COUNT 3 /* * NFP CPP operations structure @@ -56,7 +28,7 @@ struct nfp_cpp_operations { size_t area_priv_size; /* Instance an NFP CPP */ - int (*init)(struct nfp_cpp *cpp, struct rte_pci_device *dev); + int (*init)(struct nfp_cpp *cpp); /* * Free the bus. @@ -64,14 +36,21 @@ struct nfp_cpp_operations { */ void (*free)(struct nfp_cpp *cpp); + int (*get_interface)(struct rte_pci_device *dev, + uint16_t *interface); + + int (*get_serial)(struct rte_pci_device *dev, + uint8_t *serial, + size_t length); + /* * Initialize a new NFP CPP area * NOTE: This is _not_ serialized */ int (*area_init)(struct nfp_cpp_area *area, - uint32_t dest, - unsigned long long address, - unsigned long size); + uint32_t dest, + uint64_t address, + size_t size); /* * Clean up a NFP CPP area before it is freed * NOTE: This is _not_ serialized @@ -83,35 +62,35 @@ struct nfp_cpp_operations { * Serialized */ int (*area_acquire)(struct nfp_cpp_area *area); + /* * Release resources for a NFP CPP area * Serialized */ void (*area_release)(struct nfp_cpp_area *area); + /* * Return a void IO pointer to a NFP CPP area * NOTE: This is _not_ serialized */ - void *(*area_iomem)(struct nfp_cpp_area *area); - void *(*area_mapped)(struct nfp_cpp_area *area); /* * Perform a read from a NFP CPP area * Serialized */ int (*area_read)(struct nfp_cpp_area *area, - void *kernel_vaddr, - unsigned long offset, - unsigned int length); + void *kernel_vaddr, + uint32_t offset, + size_t length); /* * Perform a write to a NFP CPP area * Serialized */ int (*area_write)(struct nfp_cpp_area *area, - const void *kernel_vaddr, - unsigned long offset, - unsigned int length); + const void *kernel_vaddr, + uint32_t offset, + size_t length); }; /* @@ -127,35 +106,45 @@ struct nfp_cpp_operations { #define NFP_CPP_TARGET_ID_MASK 0x1f -/* +/** * Pack target, token, and action into a CPP ID. * * Create a 32-bit CPP identifier representing the access to be made. * These identifiers are used as parameters to other NFP CPP functions. * Some CPP devices may allow wildcard identifiers to be specified. * - * @target NFP CPP target id - * @action NFP CPP action id - * @token NFP CPP token id + * @param target + * NFP CPP target id + * @param action + * NFP CPP action id + * @param token + * NFP CPP token id * - * @return NFP CPP ID + * @return + * NFP CPP ID */ #define NFP_CPP_ID(target, action, token) \ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ (((action) & 0xff) << 8)) -/* +/** * Pack target, token, action, and island into a CPP ID. - * @target NFP CPP target id - * @action NFP CPP action id - * @token NFP CPP token id - * @island NFP CPP island id * * Create a 32-bit CPP identifier representing the access to be made. * These identifiers are used as parameters to other NFP CPP functions. * Some CPP devices may allow wildcard identifiers to be specified. * - * @return NFP CPP ID + * @param target + * NFP CPP target id + * @param action + * NFP CPP action id + * @param token + * NFP CPP token id + * @param island + * NFP CPP island id + * + * @return + * NFP CPP ID */ #define NFP_CPP_ISLAND_ID(target, action, token, island) \ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ @@ -163,9 +152,12 @@ struct nfp_cpp_operations { /** * Return the NFP CPP target of a NFP CPP ID - * @id NFP CPP ID * - * @return NFP CPP target + * @param id + * NFP CPP ID + * + * @return + * NFP CPP target */ static inline uint8_t NFP_CPP_ID_TARGET_of(uint32_t id) @@ -173,11 +165,14 @@ NFP_CPP_ID_TARGET_of(uint32_t id) return (id >> 24) & NFP_CPP_TARGET_ID_MASK; } -/* +/** * Return the NFP CPP token of a NFP CPP ID - * @id NFP CPP ID * - * @return NFP CPP token + * @param id + * NFP CPP ID + * + * @return + * NFP CPP token */ static inline uint8_t NFP_CPP_ID_TOKEN_of(uint32_t id) @@ -185,11 +180,14 @@ NFP_CPP_ID_TOKEN_of(uint32_t id) return (id >> 16) & 0xff; } -/* +/** * Return the NFP CPP action of a NFP CPP ID - * @id NFP CPP ID * - * @return NFP CPP action + * @param id + * NFP CPP ID + * + * @return + * NFP CPP action */ static inline uint8_t NFP_CPP_ID_ACTION_of(uint32_t id) @@ -197,11 +195,14 @@ NFP_CPP_ID_ACTION_of(uint32_t id) return (id >> 8) & 0xff; } -/* +/** * Return the NFP CPP island of a NFP CPP ID - * @id NFP CPP ID * - * @return NFP CPP island + * @param id + * NFP CPP ID + * + * @return + * NFP CPP island */ static inline uint8_t NFP_CPP_ID_ISLAND_of(uint32_t id) @@ -209,128 +210,42 @@ NFP_CPP_ID_ISLAND_of(uint32_t id) return id & 0xff; } -/* - * This should be the only external function the transport - * module supplies - */ -const struct nfp_cpp_operations *nfp_cpp_transport_operations(void); - -/* - * Set the model id - * - * @param cpp NFP CPP operations structure - * @param model Model ID - */ void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model); -/* - * Set the private instance owned data of a nfp_cpp struct - * - * @param cpp NFP CPP operations structure - * @param interface Interface ID - */ void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface); -/* - * Set the private instance owned data of a nfp_cpp struct - * - * @param cpp NFP CPP operations structure - * @param serial NFP serial byte array - * @param len Length of the serial byte array - */ -int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, - size_t serial_len); +void nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len); -/* - * Set the private data of the nfp_cpp instance - * - * @param cpp NFP CPP operations structure - * @return Opaque device pointer - */ void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv); -/* - * Return the private data of the nfp_cpp instance - * - * @param cpp NFP CPP operations structure - * @return Opaque device pointer - */ void *nfp_cpp_priv(struct nfp_cpp *cpp); -/* - * Get the privately allocated portion of a NFP CPP area handle - * - * @param cpp_area NFP CPP area handle - * @return Pointer to the private area, or NULL on failure - */ void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); -uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model); +uint32_t nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model); -/* - * NFP CPP core interface for CPP clients. - */ - -/* - * Open a NFP CPP handle to a CPP device - * - * @param[in] id 0-based ID for the CPP interface to use - * - * @return NFP CPP handle, or NULL on failure. - */ +/* NFP CPP core interface for CPP clients */ struct nfp_cpp *nfp_cpp_from_device_name(struct rte_pci_device *dev, - int driver_lock_needed); + void *priv, bool driver_lock_needed); -/* - * Free a NFP CPP handle - * - * @param[in] cpp NFP CPP handle - */ void nfp_cpp_free(struct nfp_cpp *cpp); #define NFP_CPP_MODEL_INVALID 0xffffffff -/* - * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID - * - * The chip ID is a 16-bit BCD+A-F encoding for the chip type. - * - * @param[in] model NFP CPP model id - * @return NFP CPP chip id - */ -#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff) - -/* - * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices - * - * NOTE: The NFP4000 series is considered as a NFP6000 series variant. - * - * @param[in] model NFP CPP model id - * @return true if model is in the NFP6000 family, false otherwise. - */ -#define NFP_CPP_MODEL_IS_6000(model) \ - ((NFP_CPP_MODEL_CHIP_of(model) >= 0x3800) && \ - (NFP_CPP_MODEL_CHIP_of(model) < 0x7000)) - -/* - * nfp_cpp_model - Retrieve the Model ID of the NFP - * - * @param[in] cpp NFP CPP handle - * @return NFP CPP Model ID - */ uint32_t nfp_cpp_model(struct nfp_cpp *cpp); /* * NFP Interface types - logical interface for this CPP connection 4 bits are * reserved for interface type. */ -#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 -#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 -#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 -#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 -#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 -/* +/** * Construct a 16-bit NFP Interface ID * * Interface IDs consists of 4 bits of interface type, 4 bits of unit @@ -340,538 +255,133 @@ uint32_t nfp_cpp_model(struct nfp_cpp *cpp); * which use the MU Atomic CompareAndWrite operation - hence the limit to 16 * bits to be able to use the NFP Interface ID as a lock owner. * - * @param[in] type NFP Interface Type - * @param[in] unit Unit identifier for the interface type - * @param[in] channel Channel identifier for the interface unit - * @return Interface ID + * @param type + * NFP Interface Type + * @param unit + * Unit identifier for the interface type + * @param channel + * Channel identifier for the interface unit + * + * @return + * Interface ID */ -#define NFP_CPP_INTERFACE(type, unit, channel) \ +#define NFP_CPP_INTERFACE(type, unit, channel) \ ((((type) & 0xf) << 12) | \ (((unit) & 0xf) << 8) | \ (((channel) & 0xff) << 0)) -/* +/** * Get the interface type of a NFP Interface ID - * @param[in] interface NFP Interface ID - * @return NFP Interface ID's type + * + * @param interface + * NFP Interface ID + * + * @return + * NFP Interface ID's type */ #define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) -/* +/** * Get the interface unit of a NFP Interface ID - * @param[in] interface NFP Interface ID - * @return NFP Interface ID's unit + * + * @param interface + * NFP Interface ID + * + * @return + * NFP Interface ID's unit */ #define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) -/* +/** * Get the interface channel of a NFP Interface ID - * @param[in] interface NFP Interface ID - * @return NFP Interface ID's channel + * + * @param interface + * NFP Interface ID + * + * @return + * NFP Interface ID's channel */ #define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) /* - * Retrieve the Interface ID of the NFP - * @param[in] cpp NFP CPP handle - * @return NFP CPP Interface ID + * Use this channel ID for multiple virtual channel interfaces + * (ie ARM and PCIe) when setting up the interface field. */ +#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER 255 + uint16_t nfp_cpp_interface(struct nfp_cpp *cpp); -/* - * Retrieve the NFP Serial Number (unique per NFP) - * @param[in] cpp NFP CPP handle - * @param[out] serial Pointer to reference the serial number array - * - * @return size of the NFP6000 serial number, in bytes - */ -int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial); +uint32_t nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial); + +bool nfp_cpp_driver_need_lock(const struct nfp_cpp *cpp); -/* - * Allocate a NFP CPP area handle, as an offset into a CPP ID - * @param[in] cpp NFP CPP handle - * @param[in] cpp_id NFP CPP ID - * @param[in] address Offset into the NFP CPP ID address space - * @param[in] size Size of the area to reserve - * - * @return NFP CPP handle, or NULL on failure. - */ struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, - unsigned long size); + uint64_t address, size_t size); -/* - * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner - * @param[in] cpp NFP CPP handle - * @param[in] cpp_id NFP CPP ID - * @param[in] name Name of owner of the area - * @param[in] address Offset into the NFP CPP ID address space - * @param[in] size Size of the area to reserve - * - * @return NFP CPP handle, or NULL on failure. - */ struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, - uint32_t cpp_id, - const char *name, - unsigned long long address, - unsigned long size); + uint32_t cpp_id, const char *name, uint64_t address, + uint32_t size); -/* - * Free an allocated NFP CPP area handle - * @param[in] area NFP CPP area handle - */ void nfp_cpp_area_free(struct nfp_cpp_area *area); -/* - * Acquire the resources needed to access the NFP CPP area handle - * - * @param[in] area NFP CPP area handle - * - * @return 0 on success, -1 on failure. - */ int nfp_cpp_area_acquire(struct nfp_cpp_area *area); -/* - * Release the resources needed to access the NFP CPP area handle - * - * @param[in] area NFP CPP area handle - */ void nfp_cpp_area_release(struct nfp_cpp_area *area); -/* - * Allocate, then acquire the resources needed to access the NFP CPP area handle - * @param[in] cpp NFP CPP handle - * @param[in] cpp_id NFP CPP ID - * @param[in] address Offset into the NFP CPP ID address space - * @param[in] size Size of the area to reserve - * - * @return NFP CPP handle, or NULL on failure. - */ struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, - uint32_t cpp_id, - unsigned long long address, - unsigned long size); + uint32_t cpp_id, uint64_t address, size_t size); -/* - * Release the resources, then free the NFP CPP area handle - * @param[in] area NFP CPP area handle - */ void nfp_cpp_area_release_free(struct nfp_cpp_area *area); uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, - uint64_t addr, unsigned long size, - struct nfp_cpp_area **area); -/* - * Return an IO pointer to the beginning of the NFP CPP area handle. The area - * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation. - * - * @param[in] area NFP CPP area handle - * - * @return Pointer to IO memory, or NULL on failure. - */ -void *nfp_cpp_area_mapped(struct nfp_cpp_area *area); + uint64_t addr, uint32_t size, struct nfp_cpp_area **area); -/* - * Read from a NFP CPP area handle into a buffer. The area must be acquired with - * 'nfp_cpp_area_acquire()' before calling this operation. - * - * @param[in] area NFP CPP area handle - * @param[in] offset Offset into the area - * @param[in] buffer Location of buffer to receive the data - * @param[in] length Length of the data to read - * - * @return bytes read on success, negative value on failure. - * - */ -int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, - void *buffer, size_t length); +int nfp_cpp_area_read(struct nfp_cpp_area *area, uint32_t offset, + void *address, size_t length); -/* - * Write to a NFP CPP area handle from a buffer. The area must be acquired with - * 'nfp_cpp_area_acquire()' before calling this operation. - * - * @param[in] area NFP CPP area handle - * @param[in] offset Offset into the area - * @param[in] buffer Location of buffer that holds the data - * @param[in] length Length of the data to read - * - * @return bytes written on success, negative value on failure. - */ -int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, - const void *buffer, size_t length); +int nfp_cpp_area_write(struct nfp_cpp_area *area, uint32_t offset, + const void *address, size_t length); -/* - * nfp_cpp_area_iomem() - get IOMEM region for CPP area - * @area: CPP area handle - * - * Returns an iomem pointer for use with readl()/writel() style operations. - * - * NOTE: Area must have been locked down with an 'acquire'. - * - * Return: pointer to the area, or NULL - */ void *nfp_cpp_area_iomem(struct nfp_cpp_area *area); -/* - * Verify that IO can be performed on an offset in an area - * - * @param[in] area NFP CPP area handle - * @param[in] offset Offset into the area - * @param[in] size Size of region to validate - * - * @return 0 on success, negative value on failure. - */ -int nfp_cpp_area_check_range(struct nfp_cpp_area *area, - unsigned long long offset, unsigned long size); - -/* - * Get the NFP CPP handle that is the parent of a NFP CPP area handle - * - * @param cpp_area NFP CPP area handle - * @return NFP CPP handle - */ struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); -/* - * Get the name passed during allocation of the NFP CPP area handle - * - * @param cpp_area NFP CPP area handle - * @return Pointer to the area's name - */ const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); -/* - * Read a block of data from a NFP CPP ID - * - * @param[in] cpp NFP CPP handle - * @param[in] cpp_id NFP CPP ID - * @param[in] address Offset into the NFP CPP ID address space - * @param[in] kernel_vaddr Buffer to copy read data to - * @param[in] length Size of the area to reserve - * - * @return bytes read on success, -1 on failure. - */ int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, void *kernel_vaddr, size_t length); + uint64_t address, void *buf, size_t length); -/* - * Write a block of data to a NFP CPP ID - * - * @param[in] cpp NFP CPP handle - * @param[in] cpp_id NFP CPP ID - * @param[in] address Offset into the NFP CPP ID address space - * @param[in] kernel_vaddr Buffer to copy write data from - * @param[in] length Size of the area to reserve - * - * @return bytes written on success, -1 on failure. - */ int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, const void *kernel_vaddr, - size_t length); - - + uint64_t address, const void *buf, size_t length); -/* - * Fill a NFP CPP area handle and offset with a value - * - * @param[in] area NFP CPP area handle - * @param[in] offset Offset into the NFP CPP ID address space - * @param[in] value 32-bit value to fill area with - * @param[in] length Size of the area to reserve - * - * @return bytes written on success, negative value on failure. - */ -int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, - uint32_t value, size_t length); +int nfp_cpp_area_readl(struct nfp_cpp_area *area, uint32_t offset, + uint32_t *value); -/* - * Read a single 32-bit value from a NFP CPP area handle - * - * @param area NFP CPP area handle - * @param offset offset into NFP CPP area handle - * @param value output value - * - * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this - * operation. - * - * NOTE: offset must be 32-bit aligned. - * - * @return 0 on success, or -1 on error. - */ -int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, - uint32_t *value); - -/* - * Write a single 32-bit value to a NFP CPP area handle - * - * @param area NFP CPP area handle - * @param offset offset into NFP CPP area handle - * @param value value to write - * - * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this - * operation. - * - * NOTE: offset must be 32-bit aligned. - * - * @return 0 on success, or -1 on error. - */ -int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, - uint32_t value); +int nfp_cpp_area_writel(struct nfp_cpp_area *area, uint32_t offset, + uint32_t value); -/* - * Read a single 64-bit value from a NFP CPP area handle - * - * @param area NFP CPP area handle - * @param offset offset into NFP CPP area handle - * @param value output value - * - * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this - * operation. - * - * NOTE: offset must be 64-bit aligned. - * - * @return 0 on success, or -1 on error. - */ -int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, - uint64_t *value); +int nfp_cpp_area_readq(struct nfp_cpp_area *area, uint32_t offset, + uint64_t *value); -/* - * Write a single 64-bit value to a NFP CPP area handle - * - * @param area NFP CPP area handle - * @param offset offset into NFP CPP area handle - * @param value value to write - * - * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this - * operation. - * - * NOTE: offset must be 64-bit aligned. - * - * @return 0 on success, or -1 on error. - */ -int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, - uint64_t value); +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, uint32_t offset, + uint64_t value); -/* - * Write a single 32-bit value on the XPB bus - * - * @param cpp NFP CPP device handle - * @param xpb_tgt XPB target and address - * @param value value to write - * - * @return 0 on success, or -1 on failure. - */ int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value); -/* - * Read a single 32-bit value from the XPB bus - * - * @param cpp NFP CPP device handle - * @param xpb_tgt XPB target and address - * @param value output value - * - * @return 0 on success, or -1 on failure. - */ int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value); -/* - * Modify bits of a 32-bit value from the XPB bus - * - * @param cpp NFP CPP device handle - * @param xpb_tgt XPB target and address - * @param mask mask of bits to alter - * @param value value to modify - * - * @return 0 on success, or -1 on failure. - */ -int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, - uint32_t value); - -/* - * Modify bits of a 32-bit value from the XPB bus - * - * @param cpp NFP CPP device handle - * @param xpb_tgt XPB target and address - * @param mask mask of bits to alter - * @param value value to monitor for - * @param timeout_us maximum number of us to wait (-1 for forever) - * - * @return >= 0 on success, negative value on failure. - */ -int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, - uint32_t value, int timeout_us); - -/* - * Read a 32-bit word from a NFP CPP ID - * - * @param cpp NFP CPP handle - * @param cpp_id NFP CPP ID - * @param address offset into the NFP CPP ID address space - * @param value output value - * - * @return 0 on success, or -1 on failure. - */ int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, uint32_t *value); + uint64_t address, uint32_t *value); -/* - * Write a 32-bit value to a NFP CPP ID - * - * @param cpp NFP CPP handle - * @param cpp_id NFP CPP ID - * @param address offset into the NFP CPP ID address space - * @param value value to write - * - * @return 0 on success, or -1 on failure. - * - */ int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, uint32_t value); + uint64_t address, uint32_t value); -/* - * Read a 64-bit work from a NFP CPP ID - * - * @param cpp NFP CPP handle - * @param cpp_id NFP CPP ID - * @param address offset into the NFP CPP ID address space - * @param value output value - * - * @return 0 on success, or -1 on failure. - */ int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, uint64_t *value); + uint64_t address, uint64_t *value); -/* - * Write a 64-bit value to a NFP CPP ID - * - * @param cpp NFP CPP handle - * @param cpp_id NFP CPP ID - * @param address offset into the NFP CPP ID address space - * @param value value to write - * - * @return 0 on success, or -1 on failure. - */ int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, - unsigned long long address, uint64_t value); - -/* - * Initialize a mutex location - - * The CPP target:address must point to a 64-bit aligned location, and will - * initialize 64 bits of data at the location. - * - * This creates the initial mutex state, as locked by this nfp_cpp_interface(). - * - * This function should only be called when setting up the initial lock state - * upon boot-up of the system. - * - * @param cpp NFP CPP handle - * @param target NFP CPP target ID - * @param address Offset into the address space of the NFP CPP target ID - * @param key_id Unique 32-bit value for this mutex - * - * @return 0 on success, negative value on failure. - */ -int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, - unsigned long long address, uint32_t key_id); - -/* - * Create a mutex handle from an address controlled by a MU Atomic engine - * - * The CPP target:address must point to a 64-bit aligned location, and reserve - * 64 bits of data at the location for use by the handle. - * - * Only target/address pairs that point to entities that support the MU Atomic - * Engine's CmpAndSwap32 command are supported. - * - * @param cpp NFP CPP handle - * @param target NFP CPP target ID - * @param address Offset into the address space of the NFP CPP target ID - * @param key_id 32-bit unique key (must match the key at this location) - * - * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on - * failure. - */ -struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, - unsigned long long address, - uint32_t key_id); - -/* - * Get the NFP CPP handle the mutex was created with - * - * @param mutex NFP mutex handle - * @return NFP CPP handle - */ -struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex); - -/* - * Get the mutex key - * - * @param mutex NFP mutex handle - * @return Mutex key - */ -uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex); - -/* - * Get the mutex owner - * - * @param mutex NFP mutex handle - * @return Interface ID of the mutex owner - * - * NOTE: This is for debug purposes ONLY - the owner may change at any time, - * unless it has been locked by this NFP CPP handle. - */ -uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex); - -/* - * Get the mutex target - * - * @param mutex NFP mutex handle - * @return Mutex CPP target (ie NFP_CPP_TARGET_MU) - */ -int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex); - -/* - * Get the mutex address - * - * @param mutex NFP mutex handle - * @return Mutex CPP address - */ -uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex); - -/* - * Free a mutex handle - does not alter the lock state - * - * @param mutex NFP CPP Mutex handle - */ -void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); - -/* - * Lock a mutex handle, using the NFP MU Atomic Engine - * - * @param mutex NFP CPP Mutex handle - * - * @return 0 on success, negative value on failure. - */ -int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); - -/* - * Unlock a mutex handle, using the NFP MU Atomic Engine - * - * @param mutex NFP CPP Mutex handle - * - * @return 0 on success, negative value on failure. - */ -int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); - -/* - * Attempt to lock a mutex handle, using the NFP MU Atomic Engine - * - * @param mutex NFP CPP Mutex handle - * @return 0 if the lock succeeded, negative value on failure. - */ -int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + uint64_t address, uint64_t value); uint32_t nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); -#endif /* !__NFP_CPP_H__ */ +#endif /* __NFP_CPP_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c deleted file mode 100644 index 658c618ee68..00000000000 --- a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c +++ /dev/null @@ -1,824 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Netronome Systems, Inc. - * All rights reserved. - */ - -/* - * nfp_cpp_pcie_ops.c - * Authors: Vinayak Tammineedi - * - * Multiplexes the NFP BARs between NFP internal resources and - * implements the PCIe specific interface for generic CPP bus access. - * - * The BARs are managed and allocated if they are available. - * The generic CPP bus abstraction builds upon this BAR interface. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "nfp_cpp.h" -#include "nfp_logs.h" -#include "nfp6000/nfp6000.h" -#include "../nfp_logs.h" - -#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) - -#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16) -#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0) -#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27) -#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0 -#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1 -#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3 -#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29) -#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7) -#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0 -#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1 -#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2 -#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3 -#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23) -#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21) - -/* - * Minimal size of the PCIe cfg memory we depend on being mapped, - * queue controller and DMA controller don't have to be covered. - */ -#define NFP_PCI_MIN_MAP_SIZE 0x080000 - -#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) -#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) -#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) -#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) -#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) - -#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(id, bar, slot) \ - (NFP_PCIE_BAR(id) + ((bar) * 8 + (slot)) * 4) - -#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \ - (((bar) * 8 + (slot)) * 4) - -/* - * Define to enable a bit more verbose debug output. - * Set to 1 to enable a bit more verbose debug output. - */ -struct nfp_pcie_user; -struct nfp6000_area_priv; - -/* - * struct nfp_bar - describes BAR configuration and usage - * @nfp: backlink to owner - * @barcfg: cached contents of BAR config CSR - * @base: the BAR's base CPP offset - * @mask: mask for the BAR aperture (read only) - * @bitsize: bitsize of BAR aperture (read only) - * @index: index of the BAR - * @lock: lock to specify if bar is in use - * @refcnt: number of current users - * @iomem: mapped IO memory - */ -#define NFP_BAR_MIN 1 -#define NFP_BAR_MID 5 -#define NFP_BAR_MAX 7 - -struct nfp_bar { - struct nfp_pcie_user *nfp; - uint32_t barcfg; - uint64_t base; /* CPP address base */ - uint64_t mask; /* Bit mask of the bar */ - uint32_t bitsize; /* Bit size of the bar */ - int index; - int lock; - - char *csr; - char *iomem; -}; - -#define BUSDEV_SZ 13 -struct nfp_pcie_user { - struct nfp_bar bar[NFP_BAR_MAX]; - - int device; - int lock; - char busdev[BUSDEV_SZ]; - int barsz; - int dev_id; - char *cfg; -}; - -static uint32_t -nfp_bar_maptype(struct nfp_bar *bar) -{ - return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg); -} - -#define TARGET_WIDTH_32 4 -#define TARGET_WIDTH_64 8 - -static int -nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config, - uint64_t *bar_base, int tgt, int act, int tok, - uint64_t offset, size_t size, int width) -{ - uint32_t bitsize; - uint32_t newcfg; - uint64_t mask; - - if (tgt >= 16) - return -EINVAL; - - switch (width) { - case 8: - newcfg = - NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT - (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT); - break; - case 4: - newcfg = - NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT - (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT); - break; - case 0: - newcfg = - NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT - (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE); - break; - default: - return -EINVAL; - } - - if (act != NFP_CPP_ACTION_RW && act != 0) { - /* Fixed CPP mapping with specific action */ - mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); - - newcfg |= - NFP_PCIE_BAR_PCIE2CPP_MAPTYPE - (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED); - newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); - newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act); - newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); - - if ((offset & mask) != ((offset + size - 1) & mask)) { - PMD_DRV_LOG(ERR, "BAR%d: Won't use for Fixed mapping <%#llx,%#llx>, action=%d BAR too small (0x%llx)", - bar->index, (unsigned long long)offset, - (unsigned long long)(offset + size), act, - (unsigned long long)mask); - return -EINVAL; - } - offset &= mask; - - PMD_DRV_LOG(DEBUG, "BAR%d: Created Fixed mapping %d:%d:%d:0x%#llx-0x%#llx>", - bar->index, tgt, act, tok, (unsigned long long)offset, - (unsigned long long)(offset + mask)); - - bitsize = 40 - 16; - } else { - mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); - - /* Bulk mapping */ - newcfg |= - NFP_PCIE_BAR_PCIE2CPP_MAPTYPE - (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK); - - newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); - newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); - - if ((offset & mask) != ((offset + size - 1) & mask)) { - PMD_DRV_LOG(ERR, "BAR%d: Won't use for bulk mapping <%#llx,%#llx> target=%d, token=%d BAR too small (%#llx) - (%#llx != %#llx).", - bar->index, (unsigned long long)offset, - (unsigned long long)(offset + size), - tgt, tok, (unsigned long long)mask, - (unsigned long long)(offset & mask), - (unsigned long long)(offset + size - 1) & mask); - return -EINVAL; - } - - offset &= mask; - - PMD_DRV_LOG(DEBUG, "BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx", - bar->index, tgt, tok, (unsigned long long)offset, - (unsigned long long)(offset + ~mask)); - - bitsize = 40 - 21; - } - - if (bar->bitsize < bitsize) { - PMD_DRV_LOG(ERR, "BAR%d: Too small for %d:%d:%d", bar->index, - tgt, tok, act); - return -EINVAL; - } - - newcfg |= offset >> bitsize; - - if (bar_base) - *bar_base = offset; - - if (bar_config) - *bar_config = newcfg; - - return 0; -} - -static int -nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar, - uint32_t newcfg) -{ - int base, slot; - - base = bar->index >> 3; - slot = bar->index & 7; - - if (nfp->cfg == NULL) - return (-ENOMEM); - - bar->csr = nfp->cfg + - NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, base, slot); - - *(uint32_t *)(bar->csr) = newcfg; - - bar->barcfg = newcfg; - PMD_DRV_LOG(DEBUG, "BAR%d: updated to 0x%08x", bar->index, newcfg); - - return 0; -} - -static int -nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt, - int act, int tok, uint64_t offset, size_t size, int width) -{ - uint64_t newbase; - uint32_t newcfg; - int err; - - err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset, - size, width); - if (err) - return err; - - bar->base = newbase; - - return nfp_bar_write(nfp, bar, newcfg); -} - -/* - * Map all PCI bars. We assume that the BAR with the PCIe config block is - * already mapped. - * - * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) - * - * Halving PCItoCPPBars for primary and secondary processes. - * For CoreNIC firmware: - * NFP PMD just requires two fixed slots, one for configuration BAR, - * and another for accessing the hw queues. Another slot is needed - * for setting the link up or down. Secondary processes do not need - * to map the first two slots again, but it requires one slot for - * accessing the link, even if it is not likely the secondary process - * starting the port. - * For Flower firmware: - * NFP PMD need another fixed slots, used as the configureation BAR - * for ctrl vNIC. - */ -static int -nfp_enable_bars(struct nfp_pcie_user *nfp) -{ - struct nfp_bar *bar; - int x, start, end; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - start = NFP_BAR_MID; - end = NFP_BAR_MIN; - } else { - start = NFP_BAR_MAX; - end = NFP_BAR_MID; - } - for (x = start; x > end; x--) { - bar = &nfp->bar[x - 1]; - bar->barcfg = 0; - bar->nfp = nfp; - bar->index = x; - bar->mask = (1 << (nfp->barsz - 3)) - 1; - bar->bitsize = nfp->barsz - 3; - bar->base = 0; - bar->iomem = NULL; - bar->lock = 0; - bar->csr = nfp->cfg + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, - bar->index >> 3, bar->index & 7); - bar->iomem = nfp->cfg + (bar->index << bar->bitsize); - } - return 0; -} - -static struct nfp_bar * -nfp_alloc_bar(struct nfp_pcie_user *nfp) -{ - struct nfp_bar *bar; - int x, start, end; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - start = NFP_BAR_MID; - end = NFP_BAR_MIN; - } else { - start = NFP_BAR_MAX; - end = NFP_BAR_MID; - } - for (x = start; x > end; x--) { - bar = &nfp->bar[x - 1]; - if (bar->lock == 0) { - bar->lock = 1; - return bar; - } - } - return NULL; -} - -static void -nfp_disable_bars(struct nfp_pcie_user *nfp) -{ - struct nfp_bar *bar; - int x, start, end; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - start = NFP_BAR_MID; - end = NFP_BAR_MIN; - } else { - start = NFP_BAR_MAX; - end = NFP_BAR_MID; - } - - for (x = start; x > end; x--) { - bar = &nfp->bar[x - 1]; - if (bar->iomem) { - bar->iomem = NULL; - bar->lock = 0; - } - } -} - -/* - * Generic CPP bus access interface. - */ - -struct nfp6000_area_priv { - struct nfp_bar *bar; - uint32_t bar_offset; - - uint32_t target; - uint32_t action; - uint32_t token; - uint64_t offset; - struct { - int read; - int write; - int bar; - } width; - size_t size; - char *iomem; -}; - -static int -nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest, - unsigned long long address, unsigned long size) -{ - struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); - struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); - uint32_t target = NFP_CPP_ID_TARGET_of(dest); - uint32_t action = NFP_CPP_ID_ACTION_of(dest); - uint32_t token = NFP_CPP_ID_TOKEN_of(dest); - int pp, ret = 0; - - pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), - address); - if (pp < 0) - return pp; - - priv->width.read = PUSH_WIDTH(pp); - priv->width.write = PULL_WIDTH(pp); - - if (priv->width.read > 0 && - priv->width.write > 0 && priv->width.read != priv->width.write) - return -EINVAL; - - if (priv->width.read > 0) - priv->width.bar = priv->width.read; - else - priv->width.bar = priv->width.write; - - priv->bar = nfp_alloc_bar(nfp); - if (priv->bar == NULL) - return -ENOMEM; - - priv->target = target; - priv->action = action; - priv->token = token; - priv->offset = address; - priv->size = size; - - ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action, - priv->token, priv->offset, priv->size, - priv->width.bar); - - return ret; -} - -static int -nfp6000_area_acquire(struct nfp_cpp_area *area) -{ - struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); - - /* Calculate offset into BAR. */ - if (nfp_bar_maptype(priv->bar) == - NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) { - priv->bar_offset = priv->offset & - (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); - priv->bar_offset += - NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar, - priv->target); - priv->bar_offset += - NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token); - } else { - priv->bar_offset = priv->offset & priv->bar->mask; - } - - /* Must have been too big. Sub-allocate. */ - if (priv->bar->iomem == NULL) - return (-ENOMEM); - - priv->iomem = priv->bar->iomem + priv->bar_offset; - - return 0; -} - -static void * -nfp6000_area_mapped(struct nfp_cpp_area *area) -{ - struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area); - - if (area_priv->iomem == NULL) - return NULL; - - return area_priv->iomem; -} - -static void -nfp6000_area_release(struct nfp_cpp_area *area) -{ - struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); - priv->bar->lock = 0; - priv->bar = NULL; - priv->iomem = NULL; -} - -static void * -nfp6000_area_iomem(struct nfp_cpp_area *area) -{ - struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); - return priv->iomem; -} - -static int -nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, - unsigned long offset, unsigned int length) -{ - uint64_t *wrptr64 = kernel_vaddr; - const volatile uint64_t *rdptr64; - struct nfp6000_area_priv *priv; - uint32_t *wrptr32 = kernel_vaddr; - const volatile uint32_t *rdptr32; - int width; - unsigned int n; - bool is_64; - - priv = nfp_cpp_area_priv(area); - rdptr64 = (uint64_t *)(priv->iomem + offset); - rdptr32 = (uint32_t *)(priv->iomem + offset); - - if (offset + length > priv->size) - return -EFAULT; - - width = priv->width.read; - - if (width <= 0) - return -EINVAL; - - /* Unaligned? Translate to an explicit access */ - if ((priv->offset + offset) & (width - 1)) { - PMD_DRV_LOG(ERR, "aread_read unaligned!!!"); - return -EINVAL; - } - - is_64 = width == TARGET_WIDTH_64; - - /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ - if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && - priv->action == NFP_CPP_ACTION_RW) { - is_64 = false; - } - - if (is_64) { - if (offset % sizeof(uint64_t) != 0 || - length % sizeof(uint64_t) != 0) - return -EINVAL; - } else { - if (offset % sizeof(uint32_t) != 0 || - length % sizeof(uint32_t) != 0) - return -EINVAL; - } - - if (priv->bar == NULL) - return -EFAULT; - - if (is_64) - for (n = 0; n < length; n += sizeof(uint64_t)) { - *wrptr64 = *rdptr64; - wrptr64++; - rdptr64++; - } - else - for (n = 0; n < length; n += sizeof(uint32_t)) { - *wrptr32 = *rdptr32; - wrptr32++; - rdptr32++; - } - - return n; -} - -static int -nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr, - unsigned long offset, unsigned int length) -{ - const uint64_t *rdptr64 = kernel_vaddr; - uint64_t *wrptr64; - const uint32_t *rdptr32 = kernel_vaddr; - struct nfp6000_area_priv *priv; - uint32_t *wrptr32; - int width; - unsigned int n; - bool is_64; - - priv = nfp_cpp_area_priv(area); - wrptr64 = (uint64_t *)(priv->iomem + offset); - wrptr32 = (uint32_t *)(priv->iomem + offset); - - if (offset + length > priv->size) - return -EFAULT; - - width = priv->width.write; - - if (width <= 0) - return -EINVAL; - - /* Unaligned? Translate to an explicit access */ - if ((priv->offset + offset) & (width - 1)) - return -EINVAL; - - is_64 = width == TARGET_WIDTH_64; - - /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ - if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && - priv->action == NFP_CPP_ACTION_RW) - is_64 = false; - - if (is_64) { - if (offset % sizeof(uint64_t) != 0 || - length % sizeof(uint64_t) != 0) - return -EINVAL; - } else { - if (offset % sizeof(uint32_t) != 0 || - length % sizeof(uint32_t) != 0) - return -EINVAL; - } - - if (priv->bar == NULL) - return -EFAULT; - - if (is_64) - for (n = 0; n < length; n += sizeof(uint64_t)) { - *wrptr64 = *rdptr64; - wrptr64++; - rdptr64++; - } - else - for (n = 0; n < length; n += sizeof(uint32_t)) { - *wrptr32 = *rdptr32; - wrptr32++; - rdptr32++; - } - - return n; -} - -#define PCI_DEVICES "/sys/bus/pci/devices" - -static int -nfp_acquire_process_lock(struct nfp_pcie_user *desc) -{ - int rc; - struct flock lock; - char lockname[30]; - - memset(&lock, 0, sizeof(lock)); - - snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev); - desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); - if (desc->lock < 0) - return desc->lock; - - lock.l_type = F_WRLCK; - lock.l_whence = SEEK_SET; - rc = -1; - while (rc != 0) { - rc = fcntl(desc->lock, F_SETLKW, &lock); - if (rc < 0) { - if (errno != EAGAIN && errno != EACCES) { - close(desc->lock); - return rc; - } - } - } - - return 0; -} - -static int -nfp6000_set_model(struct rte_pci_device *dev, struct nfp_cpp *cpp) -{ - uint32_t model; - - if (rte_pci_read_config(dev, &model, 4, 0x2e) < 0) { - PMD_DRV_LOG(ERR, "nfp set model failed"); - return -1; - } - - model = model << 16; - nfp_cpp_model_set(cpp, model); - - return 0; -} - -static int -nfp6000_set_interface(struct rte_pci_device *dev, struct nfp_cpp *cpp) -{ - uint16_t interface; - - if (rte_pci_read_config(dev, &interface, 2, 0x154) < 0) { - PMD_DRV_LOG(ERR, "nfp set interface failed"); - return -1; - } - - nfp_cpp_interface_set(cpp, interface); - - return 0; -} - -static int -nfp6000_set_serial(struct rte_pci_device *dev, struct nfp_cpp *cpp) -{ - uint16_t tmp; - uint8_t serial[6]; - int serial_len = 6; - off_t pos; - - pos = rte_pci_find_ext_capability(dev, RTE_PCI_EXT_CAP_ID_DSN); - if (pos <= 0) { - PMD_DRV_LOG(ERR, "PCI_EXT_CAP_ID_DSN not found. nfp set serial failed"); - return -1; - } else { - pos += 6; - } - - if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { - PMD_DRV_LOG(ERR, "nfp set serial failed"); - return -1; - } - - serial[4] = (uint8_t)((tmp >> 8) & 0xff); - serial[5] = (uint8_t)(tmp & 0xff); - - pos += 2; - if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { - PMD_DRV_LOG(ERR, "nfp set serial failed"); - return -1; - } - - serial[2] = (uint8_t)((tmp >> 8) & 0xff); - serial[3] = (uint8_t)(tmp & 0xff); - - pos += 2; - if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { - PMD_DRV_LOG(ERR, "nfp set serial failed"); - return -1; - } - - serial[0] = (uint8_t)((tmp >> 8) & 0xff); - serial[1] = (uint8_t)(tmp & 0xff); - - nfp_cpp_serial_set(cpp, serial, serial_len); - - return 0; -} - -static int -nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc) -{ - unsigned long tmp; - int i = 0; - - tmp = dev->mem_resource[0].len; - - while (tmp >>= 1) - i++; - - desc->barsz = i; - return 0; -} - -static int -nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev) -{ - int ret = 0; - struct nfp_pcie_user *desc; - - desc = malloc(sizeof(*desc)); - if (desc == NULL) - return -1; - - - memset(desc->busdev, 0, BUSDEV_SZ); - strlcpy(desc->busdev, dev->device.name, sizeof(desc->busdev)); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY && - cpp->driver_lock_needed) { - ret = nfp_acquire_process_lock(desc); - if (ret) - goto error; - } - - if (nfp6000_set_model(dev, cpp) < 0) - goto error; - if (nfp6000_set_interface(dev, cpp) < 0) - goto error; - if (nfp6000_set_serial(dev, cpp) < 0) - goto error; - if (nfp6000_set_barsz(dev, desc) < 0) - goto error; - - desc->cfg = dev->mem_resource[0].addr; - desc->dev_id = dev->addr.function & 0x7; - - nfp_enable_bars(desc); - - nfp_cpp_priv_set(cpp, desc); - - return 0; - -error: - free(desc); - return -1; -} - -static void -nfp6000_free(struct nfp_cpp *cpp) -{ - struct nfp_pcie_user *desc = nfp_cpp_priv(cpp); - - nfp_disable_bars(desc); - if (cpp->driver_lock_needed) - close(desc->lock); - close(desc->device); - free(desc); -} - -static const struct nfp_cpp_operations nfp6000_pcie_ops = { - .init = nfp6000_init, - .free = nfp6000_free, - - .area_priv_size = sizeof(struct nfp6000_area_priv), - .area_init = nfp6000_area_init, - .area_acquire = nfp6000_area_acquire, - .area_release = nfp6000_area_release, - .area_mapped = nfp6000_area_mapped, - .area_read = nfp6000_area_read, - .area_write = nfp6000_area_write, - .area_iomem = nfp6000_area_iomem, -}; - -const struct -nfp_cpp_operations *nfp_cpp_transport_operations(void) -{ - return &nfp6000_pcie_ops; -} diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c index 6daee313cec..f9b08a12b6d 100644 --- a/drivers/net/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c @@ -3,47 +3,111 @@ * All rights reserved. */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include - #include "nfp_cpp.h" + #include "nfp_logs.h" +#include "nfp_platform.h" +#include "nfp_target.h" #include "nfp6000/nfp6000.h" #include "nfp6000/nfp_xpb.h" -#include "nfp_nffw.h" +#include "nfp6000_pcie.h" +#define NFP_PL_DEVICE_PART_NFP6000 0x6200 #define NFP_PL_DEVICE_ID 0x00000004 #define NFP_PL_DEVICE_ID_MASK 0xff #define NFP_PL_DEVICE_PART_MASK 0xffff0000 #define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \ NFP_PL_DEVICE_ID_MASK) +/* NFP CPP handle */ +struct nfp_cpp { + void *priv; /**< Private data of the low-level implementation */ + + uint32_t model; /**< Chip model */ + uint16_t interface; /**< Chip interface id */ + uint8_t serial[NFP_SERIAL_LEN]; /**< Chip serial number */ + + /** Low-level implementation ops */ + const struct nfp_cpp_operations *op; + + /* + * NFP-6xxx originating island IMB CPP Address Translation. CPP Target + * ID is index into array. Values are obtained at runtime from local + * island XPB CSRs. + */ + uint32_t imb_cat_table[16]; + + /**< MU access type bit offset */ + uint32_t mu_locality_lsb; + + bool driver_lock_needed; +}; + +/* NFP CPP device area handle */ +struct nfp_cpp_area { + struct nfp_cpp *cpp; + char *name; + uint64_t offset; + uint32_t size; + /* Here follows the 'priv' part of nfp_cpp_area. */ + /* Here follows the ASCII name, pointed by @name */ +}; + +/** + * Set the private data of the nfp_cpp instance + * + * @param cpp + * NFP CPP operations structure + * + * @return + * Opaque device pointer + */ void -nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv) +nfp_cpp_priv_set(struct nfp_cpp *cpp, + void *priv) { cpp->priv = priv; } +/** + * Return the private data of the nfp_cpp instance + * + * @param cpp + * NFP CPP operations structure + * + * @return + * Opaque device pointer + */ void * nfp_cpp_priv(struct nfp_cpp *cpp) { return cpp->priv; } +/** + * Set the model id + * + * @param cpp + * NFP CPP operations structure + * @param model + * Model ID + */ void -nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model) +nfp_cpp_model_set(struct nfp_cpp *cpp, + uint32_t model) { cpp->model = model; } +/** + * Retrieve the Model ID of the NFP + * + * @param cpp + * NFP CPP handle + * + * @return + * NFP CPP Model ID + */ uint32_t nfp_cpp_model(struct nfp_cpp *cpp) { @@ -53,7 +117,7 @@ nfp_cpp_model(struct nfp_cpp *cpp) if (cpp == NULL) return NFP_CPP_MODEL_INVALID; - err = __nfp_cpp_model_autodetect(cpp, &model); + err = nfp_cpp_model_autodetect(cpp, &model); if (err < 0) return err; @@ -61,36 +125,68 @@ nfp_cpp_model(struct nfp_cpp *cpp) return model; } +/** + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp + * NFP CPP operations structure + * @param interface + * Interface ID + */ void -nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface) +nfp_cpp_interface_set(struct nfp_cpp *cpp, + uint32_t interface) { cpp->interface = interface; } -int -nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial) +/** + * Retrieve the Serial ID of the NFP + * + * @param cpp + * NFP CPP handle + * @param serial + * Pointer to NFP serial number + * + * @return + * Length of NFP serial number + */ +uint32_t +nfp_cpp_serial(struct nfp_cpp *cpp, + const uint8_t **serial) { - *serial = cpp->serial; - return cpp->serial_len; + *serial = &cpp->serial[0]; + + return sizeof(cpp->serial); } -int -nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, - size_t serial_len) +/** + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp + * NFP CPP operations structure + * @param serial + * NFP serial byte array + * @param serial_len + * Length of the serial byte array + */ +void +nfp_cpp_serial_set(struct nfp_cpp *cpp, + const uint8_t *serial, + size_t serial_len) { - if (cpp->serial_len) - free(cpp->serial); - - cpp->serial = malloc(serial_len); - if (cpp->serial == NULL) - return -1; - memcpy(cpp->serial, serial, serial_len); - cpp->serial_len = serial_len; - - return 0; } +/** + * Retrieve the Interface ID of the NFP + * + * @param cpp + * NFP CPP handle + * + * @return + * NFP CPP Interface ID + */ uint16_t nfp_cpp_interface(struct nfp_cpp *cpp) { @@ -100,18 +196,60 @@ nfp_cpp_interface(struct nfp_cpp *cpp) return cpp->interface; } +/** + * Retrieve the driver need lock flag + * + * @param cpp + * NFP CPP handle + * + * @return + * The driver need lock flag + */ +bool +nfp_cpp_driver_need_lock(const struct nfp_cpp *cpp) +{ + return cpp->driver_lock_needed; +} + +/** + * Get the privately allocated portion of a NFP CPP area handle + * + * @param cpp_area + * NFP CPP area handle + * + * @return + * Pointer to the private area, or NULL on failure + */ void * nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) { return &cpp_area[1]; } +/** + * Get the NFP CPP handle that is the pci_dev of a NFP CPP area handle + * + * @param cpp_area + * NFP CPP area handle + * + * @return + * NFP CPP handle + */ struct nfp_cpp * nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) { return cpp_area->cpp; } +/** + * Get the name passed during allocation of the NFP CPP area handle + * + * @param cpp_area + * NFP CPP area handle + * + * @return + * Pointer to the area's name + */ const char * nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) { @@ -148,94 +286,138 @@ nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) return cpp->mu_locality_lsb; } -/* - * nfp_cpp_area_alloc - allocate a new CPP area - * @cpp: CPP handle - * @dest: CPP id - * @address: start address on CPP target - * @size: size of area in bytes +/** + * Allocate and initialize a CPP area structure. + * The area must later be locked down with an 'acquire' before + * it can be safely accessed. * - * Allocate and initialize a CPP area structure. The area must later - * be locked down with an 'acquire' before it can be safely accessed. + * @param cpp + * CPP device handle + * @param dest + * CPP id + * @param name + * Name of region + * @param address + * Address of region + * @param size + * Size of region + * + * @return + * NFP CPP area handle, or NULL * * NOTE: @address and @size must be 32-bit aligned values. */ struct nfp_cpp_area * -nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest, - const char *name, unsigned long long address, - unsigned long size) +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + uint32_t dest, + const char *name, + uint64_t address, + uint32_t size) { - struct nfp_cpp_area *area; - uint64_t tmp64 = (uint64_t)address; int err; + size_t name_len; + uint32_t target_id; + uint64_t target_addr; + struct nfp_cpp_area *area; if (cpp == NULL) return NULL; /* Remap from cpp_island to cpp_target */ - err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + err = nfp_target_cpp(dest, address, &target_id, &target_addr, + cpp->imb_cat_table); if (err < 0) return NULL; - address = (unsigned long long)tmp64; - if (name == NULL) - name = ""; + name = "(reserved)"; - area = calloc(1, sizeof(*area) + cpp->op->area_priv_size + - strlen(name) + 1); + name_len = strlen(name) + 1; + area = calloc(1, sizeof(*area) + cpp->op->area_priv_size + name_len); if (area == NULL) return NULL; area->cpp = cpp; area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size; - memcpy(area->name, name, strlen(name) + 1); + memcpy(area->name, name, name_len); - err = cpp->op->area_init(area, dest, address, size); + err = cpp->op->area_init(area, target_id, target_addr, size); if (err < 0) { + PMD_DRV_LOG(ERR, "Area init op failed"); free(area); return NULL; } - area->offset = address; + area->offset = target_addr; area->size = size; return area; } +/** + * Allocate and initialize a CPP area structure. + * The area must later be locked down with an 'acquire' before + * it can be safely accessed. + * + * @param cpp + * CPP device handle + * @param dest + * CPP id + * @param address + * Address of region + * @param size + * Size of region + * + * @return + * NFP CPP area handle, or NULL + * + * NOTE: @address and @size must be 32-bit aligned values. + */ struct nfp_cpp_area * -nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest, - unsigned long long address, unsigned long size) +nfp_cpp_area_alloc(struct nfp_cpp *cpp, + uint32_t dest, + uint64_t address, + size_t size) { return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); } -/* - * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down - * - * @cpp: CPP handle - * @dest: CPP id - * @address: start address on CPP target - * @size: size of area - * +/** * Allocate and initialize a CPP area structure, and lock it down so * that it can be accessed directly. * + * @param cpp + * CPP device handle + * @param destination + * CPP id + * @param address + * Address of region + * @param size + * Size of region + * + * @return + * NFP CPP area handle, or NULL + * * NOTE: @address and @size must be 32-bit aligned values. * * NOTE: The area must also be 'released' when the structure is freed. */ struct nfp_cpp_area * -nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination, - unsigned long long address, unsigned long size) +nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, + uint32_t destination, + uint64_t address, + size_t size) { struct nfp_cpp_area *area; area = nfp_cpp_area_alloc(cpp, destination, address, size); - if (area == NULL) + if (area == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate CPP area"); return NULL; + } - if (nfp_cpp_area_acquire(area)) { + if (nfp_cpp_area_acquire(area) != 0) { + PMD_DRV_LOG(ERR, "Failed to acquire CPP area"); nfp_cpp_area_free(area); return NULL; } @@ -243,25 +425,25 @@ nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination, return area; } -/* - * nfp_cpp_area_free - free up the CPP area - * area: CPP area handle - * +/** * Frees up memory resources held by the CPP area. + * + * @param area + * CPP area handle */ void nfp_cpp_area_free(struct nfp_cpp_area *area) { - if (area->cpp->op->area_cleanup) + if (area->cpp->op->area_cleanup != NULL) area->cpp->op->area_cleanup(area); free(area); } -/* - * nfp_cpp_area_release_free - release CPP area and free it - * area: CPP area handle +/** + * Releases CPP area and frees up memory resources held by it. * - * Releases CPP area and frees up memory resources held by the it. + * @param area + * CPP area handle */ void nfp_cpp_area_release_free(struct nfp_cpp_area *area) @@ -270,135 +452,123 @@ nfp_cpp_area_release_free(struct nfp_cpp_area *area) nfp_cpp_area_free(area); } -/* - * nfp_cpp_area_acquire - lock down a CPP area for access - * @area: CPP area handle +/** + * Locks down the CPP area for a potential long term activity. + * Area must always be locked down before being accessed. + * + * @param area + * CPP area handle * - * Locks down the CPP area for a potential long term activity. Area - * must always be locked down before being accessed. + * @return + * 0 on success, -1 on failure. */ int nfp_cpp_area_acquire(struct nfp_cpp_area *area) { - if (area->cpp->op->area_acquire) { + if (area->cpp->op->area_acquire != NULL) { int err = area->cpp->op->area_acquire(area); - - if (err < 0) + if (err < 0) { + PMD_DRV_LOG(ERR, "Area acquire op failed"); return -1; + } } return 0; } -/* - * nfp_cpp_area_release - release a locked down CPP area - * @area: CPP area handle - * +/** * Releases a previously locked down CPP area. + * + * @param area + * CPP area handle */ void nfp_cpp_area_release(struct nfp_cpp_area *area) { - if (area->cpp->op->area_release) + if (area->cpp->op->area_release != NULL) area->cpp->op->area_release(area); } -/* - * nfp_cpp_area_iomem() - get IOMEM region for CPP area +/** + * Returns an iomem pointer for use with readl()/writel() style operations. * - * @area: CPP area handle + * @param area + * CPP area handle * - * Returns an iomem pointer for use with readl()/writel() style operations. + * @return + * Pointer to the area, or NULL * * NOTE: Area must have been locked down with an 'acquire'. - * - * Return: pointer to the area, or NULL */ void * nfp_cpp_area_iomem(struct nfp_cpp_area *area) { void *iomem = NULL; - if (area->cpp->op->area_iomem) + if (area->cpp->op->area_iomem != NULL) iomem = area->cpp->op->area_iomem(area); return iomem; } -/* - * nfp_cpp_area_read - read data from CPP area +/** + * Read data from indicated CPP region. * - * @area: CPP area handle - * @offset: offset into CPP area - * @kernel_vaddr: kernel address to put data into - * @length: number of bytes to read + * @param area + * CPP area handle + * @param offset + * Offset into CPP area + * @param address + * Address to put data into + * @param length + * Number of bytes to read * - * Read data from indicated CPP region. + * @return + * Length of io, or -ERRNO * * NOTE: @offset and @length must be 32-bit aligned values. - * * NOTE: Area must have been locked down with an 'acquire'. */ int -nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, - void *kernel_vaddr, size_t length) +nfp_cpp_area_read(struct nfp_cpp_area *area, + uint32_t offset, + void *address, + size_t length) { if ((offset + length) > area->size) return -EFAULT; - return area->cpp->op->area_read(area, kernel_vaddr, offset, length); + return area->cpp->op->area_read(area, address, offset, length); } -/* - * nfp_cpp_area_write - write data to CPP area +/** + * Write data to indicated CPP region. * - * @area: CPP area handle - * @offset: offset into CPP area - * @kernel_vaddr: kernel address to read data from - * @length: number of bytes to write + * @param area + * CPP area handle + * @param offset + * Offset into CPP area + * @param address + * Address to put data into + * @param length + * Number of bytes to read * - * Write data to indicated CPP region. + * @return + * Length of io, or -ERRNO * * NOTE: @offset and @length must be 32-bit aligned values. - * * NOTE: Area must have been locked down with an 'acquire'. */ int -nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, - const void *kernel_vaddr, size_t length) +nfp_cpp_area_write(struct nfp_cpp_area *area, + uint32_t offset, + const void *address, + size_t length) { if ((offset + length) > area->size) return -EFAULT; - return area->cpp->op->area_write(area, kernel_vaddr, offset, length); -} - -void * -nfp_cpp_area_mapped(struct nfp_cpp_area *area) -{ - if (area->cpp->op->area_mapped) - return area->cpp->op->area_mapped(area); - return NULL; -} - -/* - * nfp_cpp_area_check_range - check if address range fits in CPP area - * - * @area: CPP area handle - * @offset: offset into CPP area - * @length: size of address range in bytes - * - * Check if address range fits within CPP area. Return 0 if area fits - * or negative value on error. - */ -int -nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset, - unsigned long length) -{ - if (((offset + length) > area->size)) - return -EFAULT; - - return 0; + return area->cpp->op->area_write(area, address, offset, length); } /* @@ -406,10 +576,11 @@ nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset, * based upon NFP model. */ static uint32_t -nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) +nfp_xpb_to_cpp(struct nfp_cpp *cpp, + uint32_t *xpb_addr) { - uint32_t xpb; int island; + uint32_t xpb; xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); @@ -417,132 +588,293 @@ nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) * Ensure that non-local XPB accesses go out through the * global XPBM bus. */ - island = ((*xpb_addr) >> 24) & 0x3f; + island = (*xpb_addr >> 24) & 0x3f; if (island == 0) return xpb; - if (island == 1) { - /* - * Accesses to the ARM Island overlay uses Island 0 - * Global Bit - */ - (*xpb_addr) &= ~0x7f000000; - if (*xpb_addr < 0x60000) - *xpb_addr |= (1 << 30); - else - /* And only non-ARM interfaces use island id = 1 */ - if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) != - NFP_CPP_INTERFACE_TYPE_ARM) - *xpb_addr |= (1 << 24); + if (island != 1) { + *xpb_addr |= (1 << 30); + return xpb; + } + + /* + * Accesses to the ARM Island overlay uses Island 0 + * Global Bit + */ + *xpb_addr &= ~0x7f000000; + if (*xpb_addr < 0x60000) { + *xpb_addr |= (1 << 30); } else { - (*xpb_addr) |= (1 << 30); + /* And only non-ARM interfaces use island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) != + NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= (1 << 24); } return xpb; } +/** + * Read a uint32_t value from an area + * + * @param area + * CPP Area handle + * @param offset + * Offset into area + * @param value + * Pointer to read buffer + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, - uint32_t *value) +nfp_cpp_area_readl(struct nfp_cpp_area *area, + uint32_t offset, + uint32_t *value) { int sz; uint32_t tmp = 0; sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + if (sz != sizeof(tmp)) + return sz < 0 ? sz : -EIO; + *value = rte_le_to_cpu_32(tmp); - return (sz == sizeof(*value)) ? 0 : -1; + return 0; } +/** + * Write a uint32_t vale to an area + * + * @param area + * CPP Area handle + * @param offset + * Offset into area + * @param value + * Value to write + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, - uint32_t value) +nfp_cpp_area_writel(struct nfp_cpp_area *area, + uint32_t offset, + uint32_t value) { int sz; value = rte_cpu_to_le_32(value); sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); - return (sz == sizeof(value)) ? 0 : -1; + if (sz != sizeof(value)) + return sz < 0 ? sz : -EIO; + + return 0; } +/** + * Read a uint64_t value from an area + * + * @param area + * CPP Area handle + * @param offset + * Offset into area + * @param value + * Pointer to read buffer + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, - uint64_t *value) +nfp_cpp_area_readq(struct nfp_cpp_area *area, + uint32_t offset, + uint64_t *value) { int sz; uint64_t tmp = 0; sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + if (sz != sizeof(tmp)) + return sz < 0 ? sz : -EIO; + *value = rte_le_to_cpu_64(tmp); - return (sz == sizeof(*value)) ? 0 : -1; + return 0; } +/** + * Write a uint64_t vale to an area + * + * @param area + * CPP Area handle + * @param offset + * Offset into area + * @param value + * Value to write + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, - uint64_t value) +nfp_cpp_area_writeq(struct nfp_cpp_area *area, + uint32_t offset, + uint64_t value) { int sz; value = rte_cpu_to_le_64(value); sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + if (sz != sizeof(value)) + return sz < 0 ? sz : -EIO; - return (sz == sizeof(value)) ? 0 : -1; + return 0; } +/** + * Read a uint32_t value from a CPP location + * + * @param cpp + * CPP device handle + * @param cpp_id + * CPP ID for operation + * @param address + * Address for operation + * @param value + * Pointer to read buffer + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, - uint32_t *value) +nfp_cpp_readl(struct nfp_cpp *cpp, + uint32_t cpp_id, + uint64_t address, + uint32_t *value) { int sz; uint32_t tmp; sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + if (sz != sizeof(tmp)) + return sz < 0 ? sz : -EIO; + *value = rte_le_to_cpu_32(tmp); - return (sz == sizeof(*value)) ? 0 : -1; + return 0; } +/** + * Write a uint32_t value to a CPP location + * + * @param cpp + * CPP device handle + * @param cpp_id + * CPP ID for operation + * @param address + * Address for operation + * @param value + * Value to write + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, - uint32_t value) +nfp_cpp_writel(struct nfp_cpp *cpp, + uint32_t cpp_id, + uint64_t address, + uint32_t value) { int sz; value = rte_cpu_to_le_32(value); sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + if (sz != sizeof(value)) + return sz < 0 ? sz : -EIO; - return (sz == sizeof(value)) ? 0 : -1; + return 0; } +/** + * Read a uint64_t value from a CPP location + * + * @param cpp + * CPP device handle + * @param cpp_id + * CPP ID for operation + * @param address + * Address for operation + * @param value + * Pointer to read buffer + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, - uint64_t *value) +nfp_cpp_readq(struct nfp_cpp *cpp, + uint32_t cpp_id, + uint64_t address, + uint64_t *value) { int sz; uint64_t tmp; sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); *value = rte_le_to_cpu_64(tmp); + if (sz != sizeof(tmp)) + return sz < 0 ? sz : -EIO; - return (sz == sizeof(*value)) ? 0 : -1; + return 0; } +/** + * Write a uint64_t value to a CPP location + * + * @param cpp + * CPP device handle + * @param cpp_id + * CPP ID for operation + * @param address + * Address for operation + * @param value + * Value to write + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, - uint64_t value) +nfp_cpp_writeq(struct nfp_cpp *cpp, + uint32_t cpp_id, + uint64_t address, + uint64_t value) { int sz; value = rte_cpu_to_le_64(value); sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + if (sz != sizeof(value)) + return sz < 0 ? sz : -EIO; - return (sz == sizeof(value)) ? 0 : -1; + return 0; } +/** + * Write a uint32_t word to a XPB location + * + * @param cpp + * CPP device handle + * @param xpb_addr + * XPB target and address + * @param value + * Value to write + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value) +nfp_xpb_writel(struct nfp_cpp *cpp, + uint32_t xpb_addr, + uint32_t value) { uint32_t cpp_dest; @@ -551,8 +883,23 @@ nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value) return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); } +/** + * Read a uint32_t value from a XPB location + * + * @param cpp + * CPP device handle + * @param xpb_addr + * XPB target and address + * @param value + * Pointer to read buffer + * + * @return + * 0 on success, or -ERRNO + */ int -nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value) +nfp_xpb_readl(struct nfp_cpp *cpp, + uint32_t xpb_addr, + uint32_t *value) { uint32_t cpp_dest; @@ -562,14 +909,17 @@ nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value) } static struct nfp_cpp * -nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) +nfp_cpp_alloc(struct rte_pci_device *pci_dev, + void *priv, + bool driver_lock_needed) { - const struct nfp_cpp_operations *ops; - struct nfp_cpp *cpp; int err; + size_t target; + uint32_t xpb_addr; + struct nfp_cpp *cpp; + const struct nfp_cpp_operations *ops; ops = nfp_cpp_transport_operations(); - if (ops == NULL || ops->init == NULL) return NULL; @@ -578,32 +928,50 @@ nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) return NULL; cpp->op = ops; + cpp->priv = priv; cpp->driver_lock_needed = driver_lock_needed; - if (cpp->op->init) { - err = cpp->op->init(cpp, dev); + err = ops->get_interface(pci_dev, &cpp->interface); + if (err != 0) { + free(cpp); + return NULL; + } + + err = ops->get_serial(pci_dev, cpp->serial, NFP_SERIAL_LEN); + if (err != 0) { + free(cpp); + return NULL; + } + + /* + * NOTE: cpp_lock is NOT locked for op->init, + * since it may call NFP CPP API operations + */ + err = cpp->op->init(cpp); + if (err < 0) { + PMD_DRV_LOG(ERR, "NFP interface initialization failed"); + free(cpp); + return NULL; + } + + err = nfp_cpp_model_autodetect(cpp, &cpp->model); + if (err < 0) { + PMD_DRV_LOG(ERR, "NFP model detection failed"); + free(cpp); + return NULL; + } + + for (target = 0; target < RTE_DIM(cpp->imb_cat_table); target++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpb_addr = 0x000a0000 + (target * 4); + err = nfp_xpb_readl(cpp, xpb_addr, &cpp->imb_cat_table[target]); if (err < 0) { + PMD_DRV_LOG(ERR, "Can't read CPP mapping from device"); free(cpp); return NULL; } } - if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) { - uint32_t xpbaddr; - size_t tgt; - - for (tgt = 0; tgt < RTE_DIM(cpp->imb_cat_table); tgt++) { - /* Hardcoded XPB IMB Base, island 0 */ - xpbaddr = 0x000a0000 + (tgt * 4); - err = nfp_xpb_readl(cpp, xpbaddr, - (uint32_t *)&cpp->imb_cat_table[tgt]); - if (err < 0) { - free(cpp); - return NULL; - } - } - } - err = nfp_cpp_set_mu_locality_lsb(cpp); if (err < 0) { PMD_DRV_LOG(ERR, "Can't calculate MU locality bit offset"); @@ -614,253 +982,181 @@ nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) return cpp; } -/* - * nfp_cpp_free - free the CPP handle - * @cpp: CPP handle +/** + * Free the CPP handle + * + * @param cpp + * CPP handle */ void nfp_cpp_free(struct nfp_cpp *cpp) { - if (cpp->op && cpp->op->free) + if (cpp->op != NULL && cpp->op->free != NULL) cpp->op->free(cpp); - if (cpp->serial_len) - free(cpp->serial); - free(cpp); } -struct nfp_cpp * -nfp_cpp_from_device_name(struct rte_pci_device *dev, int driver_lock_needed) -{ - return nfp_cpp_alloc(dev, driver_lock_needed); -} - -/* - * Modify bits of a 32-bit value from the XPB bus +/** + * Create a NFP CPP handle from device + * + * @param dev + * PCI device + * @param priv + * Private data of low-level implementation + * @param driver_lock_needed + * Driver lock flag * - * @param cpp NFP CPP device handle - * @param xpb_tgt XPB target and address - * @param mask mask of bits to alter - * @param value value to modify + * @return + * NFP CPP handle on success, NULL on failure * - * @return 0 on success, or -1 on failure. + * NOTE: On failure, cpp_ops->free will be called! */ -int -nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, - uint32_t value) +struct nfp_cpp * +nfp_cpp_from_device_name(struct rte_pci_device *dev, + void *priv, + bool driver_lock_needed) { - int err; - uint32_t tmp; - - err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); - if (err < 0) - return err; - - tmp &= ~mask; - tmp |= (mask & value); - return nfp_xpb_writel(cpp, xpb_tgt, tmp); + return nfp_cpp_alloc(dev, priv, driver_lock_needed); } -/* - * Modify bits of a 32-bit value from the XPB bus +/** + * Read from CPP target * - * @param cpp NFP CPP device handle - * @param xpb_tgt XPB target and address - * @param mask mask of bits to alter - * @param value value to monitor for - * @param timeout_us maximum number of us to wait (-1 for forever) + * @param cpp + * CPP handle + * @param destination + * CPP id + * @param offset + * Offset into CPP target + * @param address + * Buffer for result + * @param length + * Number of bytes to read * - * @return >= 0 on success, or negative value on failure. + * @return + * Length of io, or -ERRNO */ int -nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, - uint32_t value, int timeout_us) +nfp_cpp_read(struct nfp_cpp *cpp, + uint32_t destination, + uint64_t offset, + void *address, + size_t length) { - uint32_t tmp; int err; - - do { - err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); - if (err < 0) - goto exit; - - if ((tmp & mask) == (value & mask)) { - if (timeout_us < 0) - timeout_us = 0; - break; - } - - if (timeout_us < 0) - continue; - - timeout_us -= 100; - usleep(100); - } while (timeout_us >= 0); - - if (timeout_us < 0) - err = -ETIMEDOUT; - else - err = timeout_us; - -exit: - return err; -} - -/* - * nfp_cpp_read - read from CPP target - * @cpp: CPP handle - * @destination: CPP id - * @address: offset into CPP target - * @kernel_vaddr: kernel buffer for result - * @length: number of bytes to read - */ -int -nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination, - unsigned long long address, void *kernel_vaddr, size_t length) -{ struct nfp_cpp_area *area; - int err; - area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + area = nfp_cpp_area_alloc_acquire(cpp, destination, offset, length); if (area == NULL) { - PMD_DRV_LOG(ERR, "Area allocation/acquire failed"); - return -1; + PMD_DRV_LOG(ERR, "Area allocation/acquire failed for read"); + return -EACCES; } - err = nfp_cpp_area_read(area, 0, kernel_vaddr, length); + err = nfp_cpp_area_read(area, 0, address, length); nfp_cpp_area_release_free(area); return err; } -/* - * nfp_cpp_write - write to CPP target - * @cpp: CPP handle - * @destination: CPP id - * @address: offset into CPP target - * @kernel_vaddr: kernel buffer to read from - * @length: number of bytes to write +/** + * Write to CPP target + * + * @param cpp + * CPP handle + * @param destination + * CPP id + * @param offset + * Offset into CPP target + * @param address + * Buffer to read from + * @param length + * Number of bytes to write + * + * @return + * Length of io, or -ERRNO */ int -nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination, - unsigned long long address, const void *kernel_vaddr, - size_t length) +nfp_cpp_write(struct nfp_cpp *cpp, + uint32_t destination, + uint64_t offset, + const void *address, + size_t length) { - struct nfp_cpp_area *area; int err; + struct nfp_cpp_area *area; - area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); - if (area == NULL) - return -1; + area = nfp_cpp_area_alloc_acquire(cpp, destination, offset, length); + if (area == NULL) { + PMD_DRV_LOG(ERR, "Area allocation/acquire failed for write"); + return -EACCES; + } - err = nfp_cpp_area_write(area, 0, kernel_vaddr, length); + err = nfp_cpp_area_write(area, 0, address, length); nfp_cpp_area_release_free(area); return err; } -/* - * nfp_cpp_area_fill - fill a CPP area with a value - * @area: CPP area - * @offset: offset into CPP area - * @value: value to fill with - * @length: length of area to fill - */ -int -nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, - uint32_t value, size_t length) -{ - int err; - size_t i; - uint64_t value64; - - value = rte_cpu_to_le_32(value); - value64 = ((uint64_t)value << 32) | value; - - if ((offset + length) > area->size) - return -EINVAL; - - if ((area->offset + offset) & 3) - return -EINVAL; - - if (((area->offset + offset) & 7) == 4 && length >= 4) { - err = nfp_cpp_area_write(area, offset, &value, sizeof(value)); - if (err < 0) - return err; - if (err != sizeof(value)) - return -ENOSPC; - offset += sizeof(value); - length -= sizeof(value); - } - - for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) { - err = - nfp_cpp_area_write(area, offset + i, &value64, - sizeof(value64)); - if (err < 0) - return err; - if (err != sizeof(value64)) - return -ENOSPC; - } - - if ((i + sizeof(value)) <= length) { - err = - nfp_cpp_area_write(area, offset + i, &value, sizeof(value)); - if (err < 0) - return err; - if (err != sizeof(value)) - return -ENOSPC; - i += sizeof(value); - } - - return (int)i; -} - /* * NOTE: This code should not use nfp_xpb_* functions, * as those are model-specific */ uint32_t -__nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) +nfp_cpp_model_autodetect(struct nfp_cpp *cpp, + uint32_t *model) { - uint32_t reg; int err; + uint32_t reg; err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, - ®); + ®); if (err < 0) return err; *model = reg & NFP_PL_DEVICE_MODEL_MASK; - if (*model & NFP_PL_DEVICE_ID_MASK) - *model -= 0x10; + /* Disambiguate the NFP4000/NFP5000/NFP6000 chips */ + if (FIELD_GET(NFP_PL_DEVICE_PART_MASK, reg) == + NFP_PL_DEVICE_PART_NFP6000) { + if ((*model & NFP_PL_DEVICE_ID_MASK) != 0) + *model -= 0x10; + } return 0; } -/* - * nfp_cpp_map_area() - Helper function to map an area - * @cpp: NFP CPP handler - * @cpp_id: CPP ID - * @addr: CPP address - * @size: Size of the area - * @area: Area handle (output) +/** + * Map an area of IOMEM access. + * To undo the effect of this function call @nfp_cpp_area_release_free(*area). * - * Map an area of IOMEM access. To undo the effect of this function call - * @nfp_cpp_area_release_free(*area). + * @param cpp + * NFP CPP handler + * @param cpp_id + * CPP id + * @param addr + * CPP address + * @param size + * Size of the area + * @param area + * Area handle (output) * - * Return: Pointer to memory mapped area or NULL + * @return + * Pointer to memory mapped area or NULL */ uint8_t * -nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, - unsigned long size, struct nfp_cpp_area **area) +nfp_cpp_map_area(struct nfp_cpp *cpp, + uint32_t cpp_id, + uint64_t addr, + uint32_t size, + struct nfp_cpp_area **area) { uint8_t *res; *area = nfp_cpp_area_alloc_acquire(cpp, cpp_id, addr, size); - if (*area == NULL) + if (*area == NULL) { + PMD_DRV_LOG(ERR, "Area allocation/acquire failed for map"); goto err_eio; + } res = nfp_cpp_area_iomem(*area); if (res == NULL) diff --git a/drivers/net/nfp/nfpcore/nfp_crc.c b/drivers/net/nfp/nfpcore/nfp_crc.c index 20431bf845f..986c52711da 100644 --- a/drivers/net/nfp/nfpcore/nfp_crc.c +++ b/drivers/net/nfp/nfpcore/nfp_crc.c @@ -9,27 +9,33 @@ #include "nfp_crc.h" static inline uint32_t -nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len, - uint32_t polynomial) +nfp_crc32_be_generic(uint32_t crc, + unsigned char const *p, + size_t len, + uint32_t polynomial) { - int i; + uint32_t i; + while (len--) { crc ^= *p++ << 24; for (i = 0; i < 8; i++) - crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : - 0); + crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : 0); } + return crc; } static inline uint32_t -nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len) +nfp_crc32_be(uint32_t crc, + unsigned char const *p, + size_t len) { return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE); } static uint32_t -nfp_crc32_posix_end(uint32_t crc, size_t total_len) +nfp_crc32_posix_end(uint32_t crc, + size_t total_len) { /* Extend with the length of the string. */ while (total_len != 0) { @@ -43,7 +49,8 @@ nfp_crc32_posix_end(uint32_t crc, size_t total_len) } uint32_t -nfp_crc32_posix(const void *buff, size_t len) +nfp_crc32_posix(const void *buff, + size_t len) { return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len); } diff --git a/drivers/net/nfp/nfpcore/nfp_dev.c b/drivers/net/nfp/nfpcore/nfp_dev.c new file mode 100644 index 00000000000..7799fa699a4 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_dev.c @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Corigine, Inc. + * All rights reserved. + */ + +#include "nfp_dev.h" + +#include + +#include "nfp_platform.h" + +/* + * Note: The value of 'max_qc_size' is different from kernel driver, + * because DPDK use 'uint16_t' as the data type. + */ +const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { + [NFP_DEV_NFP3800] = { + .qc_idx_mask = GENMASK(8, 0), + .qc_addr_offset = 0x400000, + .min_qc_size = 512, + .max_qc_size = RTE_BIT32(15), /**< 32K */ + + .chip_names = "NFP3800", + .pcie_cfg_expbar_offset = 0x0a00, + .qc_area_sz = 0x100000, + .pf_num_per_unit = 4, + }, + [NFP_DEV_NFP3800_VF] = { + .qc_idx_mask = GENMASK(8, 0), + .qc_addr_offset = 0, + .min_qc_size = 512, + .max_qc_size = RTE_BIT32(15), /**< 32K */ + }, + [NFP_DEV_NFP6000] = { + .qc_idx_mask = GENMASK(7, 0), + .qc_addr_offset = 0x80000, + .min_qc_size = 256, + .max_qc_size = RTE_BIT32(15), /**< 32K */ + + .chip_names = "NFP4000/NFP6000", + .pcie_cfg_expbar_offset = 0x0400, + .qc_area_sz = 0x80000, + .pf_num_per_unit = 1, + }, + [NFP_DEV_NFP6000_VF] = { + .qc_idx_mask = GENMASK(7, 0), + .qc_addr_offset = 0, + .min_qc_size = 256, + .max_qc_size = RTE_BIT32(15), /**< 32K */ + }, +}; + +const struct nfp_dev_info * +nfp_dev_info_get(uint16_t device_id) +{ + enum nfp_dev_id id; + + switch (device_id) { + case PCI_DEVICE_ID_NFP3800_PF_NIC: + id = NFP_DEV_NFP3800; + break; + case PCI_DEVICE_ID_NFP3800_VF_NIC: + id = NFP_DEV_NFP3800_VF; + break; + case PCI_DEVICE_ID_NFP4000_PF_NIC: + case PCI_DEVICE_ID_NFP6000_PF_NIC: + id = NFP_DEV_NFP6000; + break; + case PCI_DEVICE_ID_NFP6000_VF_NIC: + id = NFP_DEV_NFP6000_VF; + break; + default: + id = NFP_DEV_CNT; + break; + } + + if (id >= NFP_DEV_CNT) + return NULL; + + return &nfp_dev_info[id]; +} diff --git a/drivers/net/nfp/nfpcore/nfp_dev.h b/drivers/net/nfp/nfpcore/nfp_dev.h new file mode 100644 index 00000000000..b0fffff6198 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_dev.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Corigine, Inc. + * All rights reserved. + */ + +#ifndef __NFP_DEV_H__ +#define __NFP_DEV_H__ + +#include + +#define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_VENDOR_ID_CORIGINE 0x1da8 + +#define PCI_DEVICE_ID_NFP3800_PF_NIC 0x3800 +#define PCI_DEVICE_ID_NFP3800_VF_NIC 0x3803 +#define PCI_DEVICE_ID_NFP4000_PF_NIC 0x4000 +#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000 +#define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003 /* Include NFP4000VF */ + +enum nfp_dev_id { + NFP_DEV_NFP3800, + NFP_DEV_NFP3800_VF, + NFP_DEV_NFP6000, + NFP_DEV_NFP6000_VF, + NFP_DEV_CNT, +}; + +struct nfp_dev_info { + /* Required fields */ + uint32_t qc_idx_mask; + uint32_t qc_addr_offset; + uint32_t min_qc_size; + uint32_t max_qc_size; + + /* PF-only fields */ + const char *chip_names; + uint32_t pcie_cfg_expbar_offset; + uint32_t qc_area_sz; + uint8_t pf_num_per_unit; +}; + +const struct nfp_dev_info *nfp_dev_info_get(uint16_t device_id); + +#endif /* __NFP_DEV_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c index 9054bb03155..c334202bd77 100644 --- a/drivers/net/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c @@ -3,7 +3,8 @@ * All rights reserved. */ -/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM +/* + * Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM * after chip reset. * * Examples of the fields: @@ -16,29 +17,97 @@ * (ie, in this example, ME 39 has been reserved by boardconfig.) */ -#include -#include +#include "nfp_hwinfo.h" -#include "nfp_cpp.h" +#include "nfp_crc.h" #include "nfp_logs.h" -#include "nfp6000/nfp6000.h" #include "nfp_resource.h" -#include "nfp_hwinfo.h" -#include "nfp_crc.h" -static int +#define HWINFO_SIZE_MIN 0x100 + +/* + * The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: uint32_t version Hardware Info Table version (1.0) + * 0x0004: uint32_t size Total size of the table, including the + * CRC32 (IEEE 802.3) + * 0x0008: uint32_t jumptab Offset of key/value table + * 0x000c: uint32_t keys Total number of keys in the key/value table + * NNNNNN: Key/value jump table and string data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: uint32_t version Hardware Info Table version (2.0) + * 0x0004: uint32_t size Current size of the data area, excluding CRC32 + * 0x0008: uint32_t limit Maximum size of the table + * 0x000c: uint32_t reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit of + * version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: uint32_t key_1 Offset to the first key + * N+4: uint32_t val_1 Offset to the first value + * N+8: uint32_t key_2 Offset to the second key + * N+c: uint32_t val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * Unsorted. + * + * Note: Only the HwInfo v2 Table be supported now. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING RTE_BIT32(0) + +struct nfp_hwinfo { + uint8_t start[0]; + + uint32_t version; + uint32_t size; + + /* V2 specific fields */ + uint32_t limit; + uint32_t resv; + + char data[]; +}; + +static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) { return hwinfo->version & NFP_HWINFO_VERSION_UPDATING; } static int -nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size) +nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, + uint32_t size) { - const char *key, *val, *end = hwinfo->data + size; + const char *key; + const char *val; + const char *end = hwinfo->data + size; - for (key = hwinfo->data; *key && key < end; - key = val + strlen(val) + 1) { + for (key = hwinfo->data; *key != 0 && key < end; + key = val + strlen(val) + 1) { val = key + strlen(key) + 1; if (val >= end) { PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value"); @@ -50,13 +119,17 @@ nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size) return -EINVAL; } } + return 0; } static int -nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len) +nfp_hwinfo_db_validate(struct nfp_hwinfo *db, + uint32_t len) { - uint32_t size, new_crc, *crc; + uint32_t *crc; + uint32_t size; + uint32_t new_crc; size = db->size; if (size > len) { @@ -68,8 +141,8 @@ nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len) new_crc = nfp_crc32_posix((char *)db, size); crc = (uint32_t *)(db->start + size); if (new_crc != *crc) { - PMD_DRV_LOG(ERR, "Corrupt hwinfo table (CRC mismatch) calculated 0x%x, expected 0x%x", - new_crc, *crc); + PMD_DRV_LOG(ERR, "CRC mismatch, calculated %#x, expected %#x", + new_crc, *crc); return -EINVAL; } @@ -77,75 +150,79 @@ nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len) } static struct nfp_hwinfo * -nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, + size_t *cpp_size) { - struct nfp_hwinfo *header; - void *res; - uint64_t cpp_addr; - uint32_t cpp_id; int err; + void *res; uint8_t *db; + uint32_t cpp_id; + uint64_t cpp_addr; + struct nfp_hwinfo *header; res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); - if (res) { - cpp_id = nfp_resource_cpp_id(res); - cpp_addr = nfp_resource_address(res); - *cpp_size = nfp_resource_size(res); + if (res == NULL) { + PMD_DRV_LOG(ERR, "HWInfo - acquire resource failed"); + return NULL; + } - nfp_resource_release(res); + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); - if (*cpp_size < HWINFO_SIZE_MIN) - return NULL; - } else { + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) return NULL; - } db = malloc(*cpp_size + 1); if (db == NULL) return NULL; err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); - if (err != (int)*cpp_size) + if (err != (int)*cpp_size) { + PMD_DRV_LOG(ERR, "HWInfo - CPP read error %d", err); goto exit_free; + } - header = (void *)db; - PMD_DRV_LOG(DEBUG, "NFP HWINFO header: %#08x", *(uint32_t *)header); + header = (struct nfp_hwinfo *)db; if (nfp_hwinfo_is_updating(header)) goto exit_free; if (header->version != NFP_HWINFO_VERSION_2) { - PMD_DRV_LOG(DEBUG, "Unknown HWInfo version: 0x%08x", - header->version); + PMD_DRV_LOG(ERR, "Unknown HWInfo version: %#08x", + header->version); goto exit_free; } /* NULL-terminate for safety */ db[*cpp_size] = '\0'; - return (void *)db; + return (struct nfp_hwinfo *)db; + exit_free: free(db); return NULL; } static struct nfp_hwinfo * -nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +nfp_hwinfo_fetch(struct nfp_cpp *cpp, + size_t *hwdb_size) { + int count = 0; struct timespec wait; struct nfp_hwinfo *db; - int count; wait.tv_sec = 0; - wait.tv_nsec = 10000000; - count = 0; + wait.tv_nsec = 10000000; /* 10ms */ for (;;) { db = nfp_hwinfo_try_fetch(cpp, hwdb_size); - if (db) + if (db != NULL) return db; nanosleep(&wait, NULL); - if (count++ > 200) { + if (count++ > 200) { /* 10ms * 200 = 2s */ PMD_DRV_LOG(ERR, "NFP access error"); return NULL; } @@ -155,41 +232,49 @@ nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) struct nfp_hwinfo * nfp_hwinfo_read(struct nfp_cpp *cpp) { - struct nfp_hwinfo *db; - size_t hwdb_size = 0; int err; + size_t hwdb_size = 0; + struct nfp_hwinfo *db; db = nfp_hwinfo_fetch(cpp, &hwdb_size); if (db == NULL) return NULL; err = nfp_hwinfo_db_validate(db, hwdb_size); - if (err) { + if (err != 0) { free(db); return NULL; } + return db; } -/* - * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name - * @hwinfo: NFP HWinfo table - * @lookup: HWInfo name to search for +/** + * Find a value in the HWInfo table by name + * + * @param hwinfo + * NFP HWInfo table + * @param lookup + * HWInfo name to search for * - * Return: Value of the HWInfo name, or NULL + * @return + * Value of the HWInfo name, or NULL */ const char * -nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup) +nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, + const char *lookup) { - const char *key, *val, *end; + const char *key; + const char *val; + const char *end; if (hwinfo == NULL || lookup == NULL) return NULL; end = hwinfo->data + hwinfo->size - sizeof(uint32_t); - for (key = hwinfo->data; *key && key < end; - key = val + strlen(val) + 1) { + for (key = hwinfo->data; *key != 0 && key < end; + key = val + strlen(val) + 1) { val = key + strlen(key) + 1; if (strcmp(key, lookup) == 0) diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/drivers/net/nfp/nfpcore/nfp_hwinfo.h index a3da7512dbe..c812f100764 100644 --- a/drivers/net/nfp/nfpcore/nfp_hwinfo.h +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.h @@ -6,80 +6,12 @@ #ifndef __NFP_HWINFO_H__ #define __NFP_HWINFO_H__ -#include +#include "nfp_cpp.h" -#define HWINFO_SIZE_MIN 0x100 - -/* - * The Hardware Info Table defines the properties of the system. - * - * HWInfo v1 Table (fixed size) - * - * 0x0000: uint32_t version Hardware Info Table version (1.0) - * 0x0004: uint32_t size Total size of the table, including the - * CRC32 (IEEE 802.3) - * 0x0008: uint32_t jumptab Offset of key/value table - * 0x000c: uint32_t keys Total number of keys in the key/value - * table - * NNNNNN: Key/value jump table and string data - * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) - * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE - * - * HWInfo v2 Table (variable size) - * - * 0x0000: uint32_t version Hardware Info Table version (2.0) - * 0x0004: uint32_t size Current size of the data area, excluding - * CRC32 - * 0x0008: uint32_t limit Maximum size of the table - * 0x000c: uint32_t reserved Unused, set to zero - * NNNNNN: Key/value data - * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) - * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE - * - * If the HWInfo table is in the process of being updated, the low bit of - * version will be set. - * - * HWInfo v1 Key/Value Table - * ------------------------- - * - * The key/value table is a set of offsets to ASCIIZ strings which have - * been strcmp(3) sorted (yes, please use bsearch(3) on the table). - * - * All keys are guaranteed to be unique. - * - * N+0: uint32_t key_1 Offset to the first key - * N+4: uint32_t val_1 Offset to the first value - * N+8: uint32_t key_2 Offset to the second key - * N+c: uint32_t val_2 Offset to the second value - * ... - * - * HWInfo v2 Key/Value Table - * ------------------------- - * - * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' - * - * Unsorted. - */ - -#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) -#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) -#define NFP_HWINFO_VERSION_UPDATING RTE_BIT32(0) - -struct nfp_hwinfo { - uint8_t start[0]; - - uint32_t version; - uint32_t size; - - /* v2 specific fields */ - uint32_t limit; - uint32_t resv; - - char data[]; -}; +struct nfp_hwinfo; struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); -#endif +#endif /* __NFP_HWINFO_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c index 6b392ad5eb9..d5ada3687ad 100644 --- a/drivers/net/nfp/nfpcore/nfp_mip.c +++ b/drivers/net/nfp/nfpcore/nfp_mip.c @@ -3,17 +3,16 @@ * All rights reserved. */ -#include +#include "nfp_mip.h" + #include -#include "nfp_cpp.h" #include "nfp_logs.h" -#include "nfp_mip.h" #include "nfp_nffw.h" -#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */ -#define NFP_MIP_VERSION rte_cpu_to_le_32(1) -#define NFP_MIP_MAX_OFFSET (256 * 1024) +#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION rte_cpu_to_le_32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) struct nfp_mip { uint32_t signature; @@ -37,24 +36,28 @@ struct nfp_mip { /* Read memory and check if it could be a valid MIP */ static int -nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, - struct nfp_mip *mip) +nfp_mip_try_read(struct nfp_cpp *cpp, + uint32_t cpp_id, + uint64_t addr, + struct nfp_mip *mip) { int ret; ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); if (ret != sizeof(*mip)) { - PMD_DRV_LOG(ERR, "Failed to read MIP data (%d, %zu)", ret, sizeof(*mip)); + PMD_DRV_LOG(ERR, "Failed to read MIP data"); return -EIO; } + if (mip->signature != NFP_MIP_SIGNATURE) { - PMD_DRV_LOG(ERR, "Incorrect MIP signature (0x%08x)", - rte_le_to_cpu_32(mip->signature)); + PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x", + rte_le_to_cpu_32(mip->signature)); return -EINVAL; } + if (mip->mip_version != NFP_MIP_VERSION) { - PMD_DRV_LOG(ERR, "Unsupported MIP version (%d)", - rte_le_to_cpu_32(mip->mip_version)); + PMD_DRV_LOG(ERR, "Unsupported MIP version %d", + rte_le_to_cpu_32(mip->mip_version)); return -EINVAL; } @@ -63,49 +66,53 @@ nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, /* Try to locate MIP using the resource table */ static int -nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +nfp_mip_read_resource(struct nfp_cpp *cpp, + struct nfp_mip *mip) { - struct nfp_nffw_info *nffw_info; - uint32_t cpp_id; - uint64_t addr; int err; + uint64_t addr; + uint32_t cpp_id; + struct nfp_nffw_info *nffw_info; nffw_info = nfp_nffw_info_open(cpp); if (nffw_info == NULL) return -ENODEV; err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); - if (err) + if (err != 0) goto exit_close_nffw; err = nfp_mip_try_read(cpp, cpp_id, addr, mip); + exit_close_nffw: nfp_nffw_info_close(nffw_info); return err; } -/* - * nfp_mip_open() - Get device MIP structure - * @cpp: NFP CPP Handle - * - * Copy MIP structure from NFP device and return it. The returned +/** + * Copy MIP structure from NFP device and return it. The returned * structure is handled internally by the library and should be - * freed by calling nfp_mip_close(). + * freed by calling @nfp_mip_close(). + * + * @param cpp + * NFP CPP Handle * - * Return: pointer to mip, NULL on failure. + * @return + * Pointer to MIP, NULL on failure. */ struct nfp_mip * nfp_mip_open(struct nfp_cpp *cpp) { - struct nfp_mip *mip; int err; + struct nfp_mip *mip; mip = malloc(sizeof(*mip)); if (mip == NULL) return NULL; err = nfp_mip_read_resource(cpp, mip); - if (err) { + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to read MIP resource"); free(mip); return NULL; } @@ -127,27 +134,39 @@ nfp_mip_name(const struct nfp_mip *mip) return mip->name; } -/* - * nfp_mip_symtab() - Get the address and size of the MIP symbol table - * @mip: MIP handle - * @addr: Location for NFP DDR address of MIP symbol table - * @size: Location for size of MIP symbol table +/** + * Get the address and size of the MIP symbol table. + * + * @param mip + * MIP handle + * @param addr + * Location for NFP DDR address of MIP symbol table + * @param size + * Location for size of MIP symbol table */ void -nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +nfp_mip_symtab(const struct nfp_mip *mip, + uint32_t *addr, + uint32_t *size) { *addr = rte_le_to_cpu_32(mip->symtab_addr); *size = rte_le_to_cpu_32(mip->symtab_size); } -/* - * nfp_mip_strtab() - Get the address and size of the MIP symbol name table - * @mip: MIP handle - * @addr: Location for NFP DDR address of MIP symbol name table - * @size: Location for size of MIP symbol name table +/** + * Get the address and size of the MIP symbol name table. + * + * @param mip + * MIP handle + * @param addr + * Location for NFP DDR address of MIP symbol name table + * @param size + * Location for size of MIP symbol name table */ void -nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +nfp_mip_strtab(const struct nfp_mip *mip, + uint32_t *addr, + uint32_t *size) { *addr = rte_le_to_cpu_32(mip->strtab_addr); *size = rte_le_to_cpu_32(mip->strtab_size); diff --git a/drivers/net/nfp/nfpcore/nfp_mip.h b/drivers/net/nfp/nfpcore/nfp_mip.h index d0919b58fe4..dbd9af31ed3 100644 --- a/drivers/net/nfp/nfpcore/nfp_mip.h +++ b/drivers/net/nfp/nfpcore/nfp_mip.h @@ -6,7 +6,7 @@ #ifndef __NFP_MIP_H__ #define __NFP_MIP_H__ -#include "nfp_nffw.h" +#include "nfp_cpp.h" struct nfp_mip; @@ -16,6 +16,5 @@ void nfp_mip_close(struct nfp_mip *mip); const char *nfp_mip_name(const struct nfp_mip *mip); void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); -int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, - uint64_t *off); -#endif + +#endif /* __NFP_MIP_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_mutex.c b/drivers/net/nfp/nfpcore/nfp_mutex.c index f967a29351d..3c10c7a090c 100644 --- a/drivers/net/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/nfp/nfpcore/nfp_mutex.c @@ -3,20 +3,12 @@ * All rights reserved. */ -#include -#include +#include "nfp_mutex.h" + #include -#include "nfp_cpp.h" #include "nfp_logs.h" -#include "nfp6000/nfp6000.h" - -#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f) -#define MUTEX_UNLOCK(interface) (0 | 0x0000) - -#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f) -#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000) -#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff) +#include "nfp_target.h" /* * If you need more than 65536 recursive locks, please @@ -28,30 +20,62 @@ struct nfp_cpp_mutex { struct nfp_cpp *cpp; uint8_t target; uint16_t depth; - unsigned long long address; + uint64_t address; uint32_t key; - unsigned int usage; + uint32_t usage; struct nfp_cpp_mutex *prev, *next; }; +static inline uint32_t +nfp_mutex_locked(uint16_t interface) +{ + return (uint32_t)interface << 16 | 0x000f; +} + +static inline uint32_t +nfp_mutex_unlocked(uint16_t interface) +{ + return (uint32_t)interface << 16 | 0x0000; +} + +static inline uint16_t +nfp_mutex_owner(uint32_t val) +{ + return (val >> 16) & 0xffff; +} + +static inline bool +nfp_mutex_is_locked(uint32_t val) +{ + return (val & 0xffff) == 0x000f; +} + +static inline bool +nfp_mutex_is_unlocked(uint32_t val) +{ + return (val & 0xffff) == 0; +} + static int -_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address) +nfp_cpp_mutex_validate(uint16_t interface, + int *target, + uint64_t address) { + /* Not permitted on invalid interfaces */ + if (NFP_CPP_INTERFACE_TYPE_of(interface) == NFP_CPP_INTERFACE_TYPE_INVALID) + return -EINVAL; + /* Address must be 64-bit aligned */ - if (address & 7) + if ((address & 7) != 0) return -EINVAL; - if (NFP_CPP_MODEL_IS_6000(model)) { - if (*target != NFP_CPP_TARGET_MU) - return -EINVAL; - } else { + if (*target != NFP_CPP_TARGET_MU) return -EINVAL; - } return 0; } -/* +/** * Initialize a mutex location * * The CPP target:address must point to a 64-bit aligned location, and @@ -63,23 +87,29 @@ _nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address) * This function should only be called when setting up * the initial lock state upon boot-up of the system. * - * @param mutex NFP CPP Mutex handle - * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or - * NFP_CPP_TARGET_MU) - * @param address Offset into the address space of the NFP CPP target ID - * @param key Unique 32-bit value for this mutex + * @param cpp + * NFP CPP handle + * @param target + * NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @param address + * Offset into the address space of the NFP CPP target ID + * @param key + * Unique 32-bit value for this mutex * - * @return 0 on success, or negative value on failure. + * @return + * 0 on success, or negative value on failure */ int -nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address, - uint32_t key) +nfp_cpp_mutex_init(struct nfp_cpp *cpp, + int target, + uint64_t address, + uint32_t key) { - uint32_t model = nfp_cpp_model(cpp); - uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ int err; + uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + uint16_t interface = nfp_cpp_interface(cpp); - err = _nfp_cpp_mutex_validate(model, &target, address); + err = nfp_cpp_mutex_validate(interface, &target, address); if (err < 0) return err; @@ -87,16 +117,14 @@ nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address, if (err < 0) return err; - err = - nfp_cpp_writel(cpp, muw, address + 0, - MUTEX_LOCKED(nfp_cpp_interface(cpp))); + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); if (err < 0) return err; return 0; } -/* +/** * Create a mutex handle from an address controlled by a MU Atomic engine * * The CPP target:address must point to a 64-bit aligned location, and @@ -105,41 +133,31 @@ nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address, * Only target/address pairs that point to entities that support the * MU Atomic Engine are supported. * - * @param cpp NFP CPP handle - * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or - * NFP_CPP_TARGET_MU) - * @param address Offset into the address space of the NFP CPP target ID - * @param key 32-bit unique key (must match the key at this location) + * @param cpp + * NFP CPP handle + * @param target + * NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @param address + * Offset into the address space of the NFP CPP target ID + * @param key + * 32-bit unique key (must match the key at this location) * - * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + * @return + * A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. */ struct nfp_cpp_mutex * -nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, - unsigned long long address, uint32_t key) +nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, + int target, + uint64_t address, + uint32_t key) { - uint32_t model = nfp_cpp_model(cpp); - struct nfp_cpp_mutex *mutex; - uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ int err; uint32_t tmp; + struct nfp_cpp_mutex *mutex; + uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + uint16_t interface = nfp_cpp_interface(cpp); - /* Look for cached mutex */ - for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) { - if (mutex->target == target && mutex->address == address) - break; - } - - if (mutex) { - if (mutex->key == key) { - mutex->usage++; - return mutex; - } - - /* If the key doesn't match... */ - return NULL; - } - - err = _nfp_cpp_mutex_validate(model, &target, address); + err = nfp_cpp_mutex_validate(interface, &target, address); if (err < 0) return NULL; @@ -159,99 +177,30 @@ nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, mutex->address = address; mutex->key = key; mutex->depth = 0; - mutex->usage = 1; - - /* Add mutex to the cache */ - if (cpp->mutex_cache) { - cpp->mutex_cache->prev = mutex; - mutex->next = cpp->mutex_cache; - cpp->mutex_cache = mutex; - } else { - cpp->mutex_cache = mutex; - } return mutex; } -struct nfp_cpp * -nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex) -{ - return mutex->cpp; -} - -uint32_t -nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex) -{ - return mutex->key; -} - -uint16_t -nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex) -{ - uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ - uint32_t value, key; - int err; - - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); - if (err < 0) - return err; - - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); - if (err < 0) - return err; - - if (key != mutex->key) - return -EPERM; - - if (MUTEX_IS_LOCKED(value) == 0) - return 0; - - return MUTEX_INTERFACE(value); -} - -int -nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex) -{ - return mutex->target; -} - -uint64_t -nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex) -{ - return mutex->address; -} - -/* +/** * Free a mutex handle - does not alter the lock state * - * @param mutex NFP CPP Mutex handle + * @param mutex + * NFP CPP Mutex handle */ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) { - mutex->usage--; - if (mutex->usage > 0) - return; - - /* Remove mutex from the cache */ - if (mutex->next) - mutex->next->prev = mutex->prev; - if (mutex->prev) - mutex->prev->next = mutex->next; - - /* If mutex->cpp == NULL, something broke */ - if (mutex->cpp && mutex == mutex->cpp->mutex_cache) - mutex->cpp->mutex_cache = mutex->next; - free(mutex); } -/* +/** * Lock a mutex handle, using the NFP MU Atomic Engine * - * @param mutex NFP CPP Mutex handle + * @param mutex + * NFP CPP Mutex handle * - * @return 0 on success, or negative value on failure. + * @return + * 0 on success, or negative value on failure. */ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) @@ -263,87 +212,91 @@ nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) /* If err != -EBUSY, then the lock was damaged */ if (err < 0 && err != -EBUSY) return err; + if (time(NULL) >= warn_at) { - PMD_DRV_LOG(ERR, "Warning: waiting for NFP mutex usage:%u depth:%hd] target:%d addr:%llx key:%08x]", - mutex->usage, mutex->depth, mutex->target, - mutex->address, mutex->key); + PMD_DRV_LOG(WARNING, "Waiting for NFP mutex..."); warn_at = time(NULL) + 60; } + sched_yield(); } + return 0; } -/* +/** * Unlock a mutex handle, using the NFP MU Atomic Engine * - * @param mutex NFP CPP Mutex handle + * @param mutex + * NFP CPP Mutex handle * - * @return 0 on success, or negative value on failure. + * @return + * 0 on success, or negative value on failure */ int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) { - uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ - uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + int err; + uint32_t key; + uint32_t value; struct nfp_cpp *cpp = mutex->cpp; - uint32_t key, value; uint16_t interface = nfp_cpp_interface(cpp); - int err; + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ if (mutex->depth > 1) { mutex->depth--; return 0; } - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); - if (err < 0) - goto exit; - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); if (err < 0) - goto exit; + return err; - if (key != mutex->key) { - err = -EPERM; - goto exit; - } + if (key != mutex->key) + return -EPERM; - if (value != MUTEX_LOCKED(interface)) { - err = -EACCES; - goto exit; - } + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + if (value != nfp_mutex_locked(interface)) + return -EACCES; - err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface)); + err = nfp_cpp_writel(cpp, muw, mutex->address, + nfp_mutex_unlocked(interface)); if (err < 0) - goto exit; + return err; mutex->depth = 0; -exit: - return err; + return 0; } -/* +/** * Attempt to lock a mutex handle, using the NFP MU Atomic Engine * * Valid lock states: - * * 0x....0000 - Unlocked * 0x....000f - Locked * - * @param mutex NFP CPP Mutex handle - * @return 0 if the lock succeeded, negative value on failure. + * @param mutex + * NFP CPP Mutex handle + * + * @return + * 0 if the lock succeeded, negative value on failure. */ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) { - uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ - uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ - uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ - uint32_t key, value, tmp; - struct nfp_cpp *cpp = mutex->cpp; int err; + uint32_t key; + uint32_t tmp; + uint32_t value; + struct nfp_cpp *cpp = mutex->cpp; + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ if (mutex->depth > 0) { if (mutex->depth == MUTEX_DEPTH_MAX) @@ -356,19 +309,17 @@ nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) /* Verify that the lock marker is not damaged */ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); if (err < 0) - goto exit; + return err; - if (key != mutex->key) { - err = -EPERM; - goto exit; - } + if (key != mutex->key) + return -EPERM; /* * Compare against the unlocked state, and if true, * write the interface id into the top 16 bits, and * mark as locked. */ - value = MUTEX_LOCKED(nfp_cpp_interface(cpp)); + value = nfp_mutex_locked(nfp_cpp_interface(cpp)); /* * We use test_set_imm here, as it implies a read @@ -385,35 +336,79 @@ nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) */ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); if (err < 0) - goto exit; + return err; /* Was it unlocked? */ - if (MUTEX_IS_UNLOCKED(tmp)) { + if (nfp_mutex_is_unlocked(tmp)) { /* * The read value can only be 0x....0000 in the unlocked state. * If there was another contending for this lock, then * the lock state would be 0x....000f * - * Write our owner ID into the lock + * Write our owner ID into the lock. * While not strictly necessary, this helps with * debug and bookkeeping. */ err = nfp_cpp_writel(cpp, muw, mutex->address, value); if (err < 0) - goto exit; + return err; mutex->depth = 1; - goto exit; + return 0; } /* Already locked by us? Success! */ if (tmp == value) { mutex->depth = 1; - goto exit; + return 0; } - err = MUTEX_IS_LOCKED(tmp) ? -EBUSY : -EINVAL; + return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; +} + +/** + * Release lock if held by local system. + * Extreme care is advised, call only when no local lock users can exist. + * + * @param cpp + * NFP CPP handle + * @param target + * NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @param address + * Offset into the address space of the NFP CPP target ID + * + * @return + * - (0) if the lock was OK + * - (1) if locked by us + * - (-errno) on invalid mutex + */ +int +nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, + int target, + uint64_t address) +{ + int err; + uint32_t tmp; + uint16_t interface = nfp_cpp_interface(cpp); + const uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + const uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err != 0) + return err; + + /* Check lock */ + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; + + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; + + /* Bust the lock */ + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); + if (err < 0) + return err; -exit: - return err; + return 1; } diff --git a/drivers/net/nfp/nfpcore/nfp_mutex.h b/drivers/net/nfp/nfpcore/nfp_mutex.h new file mode 100644 index 00000000000..a79490b4d67 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mutex.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Corigine, Inc. + * All rights reserved. + */ + +#ifndef __NFP_MUTEX_H__ +#define __NFP_MUTEX_H__ + +#include "nfp_cpp.h" + +struct nfp_cpp_mutex; + +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + uint64_t address, uint32_t key_id); + +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + uint64_t address, uint32_t key_id); + +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, uint64_t address); + +#endif /* __NFP_MUTEX_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c index 07d63900dcd..2f07fcd6c1c 100644 --- a/drivers/net/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/nfp/nfpcore/nfp_nffw.c @@ -3,11 +3,76 @@ * All rights reserved. */ -#include "nfp_cpp.h" #include "nfp_nffw.h" + +#include "../nfp_logs.h" #include "nfp_mip.h" -#include "nfp6000/nfp6000.h" #include "nfp_resource.h" +#include "nfp6000/nfp6000.h" + +/* + * Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/* + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +/* nfp.nffw meinfo */ +struct nffw_meinfo { + uint32_t ctxmask_fwid_meid; +}; + +struct nffw_fwinfo { + uint32_t loaded_mu_da_mip_off_hi; + uint32_t mip_cppid; /**< 0 means no MIP */ + uint32_t mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +struct nfp_nffw_info_data { + uint32_t flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; /* * flg_info_version = flags[0]<27:16> @@ -30,11 +95,11 @@ nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) return (res->flags[0] >> 0) & 1; } -/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +/* loaded = loaded_mu_da_mip_off_hi<31:31> */ static uint32_t nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) { - return (fi->loaded__mu_da__mip_off_hi >> 31) & 1; + return (fi->loaded_mu_da_mip_off_hi >> 31) & 1; } /* mip_cppid = mip_cppid */ @@ -44,48 +109,25 @@ nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) return fi->mip_cppid; } -/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +/* loaded = loaded_mu_da_mip_off_hi<8:8> */ static uint32_t nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) { - return (fi->loaded__mu_da__mip_off_hi >> 8) & 1; + return (fi->loaded_mu_da_mip_off_hi >> 8) & 1; } -/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +/* mip_offset = (loaded_mu_da_mip_off_hi<7:0> << 32) | mip_offset_lo */ static uint64_t nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) { - uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi; + uint64_t mip_off_hi = fi->loaded_mu_da_mip_off_hi; return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo; } -#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE RTE_BIT32(12) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT RTE_BIT32(12) - -static int -nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) -{ - unsigned int mode, addr40; - uint32_t xpbaddr, imbcppat; - int err; - - /* Hardcoded XPB IMB Base, island 0 */ - xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; - err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); - if (err < 0) - return err; - - mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); - addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); - - return nfp_cppat_mu_locality_lsb(mode, addr40); -} - -static unsigned int -nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +static uint32_t +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, + struct nffw_fwinfo **arr) { /* * For the this code, version 0 is most likely to be version 1 in this @@ -108,19 +150,22 @@ nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) } } -/* - * nfp_nffw_info_open() - Acquire the lock on the NFFW table - * @cpp: NFP CPP handle +/** + * Acquire the lock on the NFFW table + * + * @param cpp + * NFP CPP handle * - * Return: nffw info pointer, or NULL on failure + * @return + * NFFW info pointer, or NULL on failure */ struct nfp_nffw_info * nfp_nffw_info_open(struct nfp_cpp *cpp) { - struct nfp_nffw_info_data *fwinf; - struct nfp_nffw_info *state; - uint32_t info_ver; int err; + uint32_t info_ver; + struct nfp_nffw_info *state; + struct nfp_nffw_info_data *fwinf; state = malloc(sizeof(*state)); if (state == NULL) @@ -129,8 +174,10 @@ nfp_nffw_info_open(struct nfp_cpp *cpp) memset(state, 0, sizeof(*state)); state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); - if (state->res == NULL) + if (state->res == NULL) { + PMD_DRV_LOG(ERR, "NFFW - acquire resource failed"); goto err_free; + } fwinf = &state->fwinf; @@ -138,10 +185,12 @@ nfp_nffw_info_open(struct nfp_cpp *cpp) goto err_release; err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), - nfp_resource_address(state->res), - fwinf, sizeof(*fwinf)); - if (err < (int)sizeof(*fwinf)) + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < (int)sizeof(*fwinf)) { + PMD_DRV_LOG(ERR, "NFFW - CPP read error %d", err); goto err_release; + } if (nffw_res_flg_init_get(fwinf) == 0) goto err_release; @@ -160,11 +209,11 @@ nfp_nffw_info_open(struct nfp_cpp *cpp) return NULL; } -/* - * nfp_nffw_info_close() - Release the lock on the NFFW table - * @state: NFP FW info state +/** + * Release the lock on the NFFW table * - * Return: void + * @param state + * NFFW info pointer */ void nfp_nffw_info_close(struct nfp_nffw_info *state) @@ -173,40 +222,50 @@ nfp_nffw_info_close(struct nfp_nffw_info *state) free(state); } -/* - * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW - * @state: NFP FW info state +/** + * Return the first firmware ID in the NFFW + * + * @param state + * NFFW info pointer * - * Return: First NFFW firmware info, NULL on failure + * @return: + * First NFFW firmware info, NULL on failure */ static struct nffw_fwinfo * nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) { + uint32_t i; + uint32_t cnt; struct nffw_fwinfo *fwinfo; - unsigned int cnt, i; cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); if (cnt == 0) return NULL; for (i = 0; i < cnt; i++) - if (nffw_fwinfo_loaded_get(&fwinfo[i])) + if (nffw_fwinfo_loaded_get(&fwinfo[i]) != 0) return &fwinfo[i]; return NULL; } -/* - * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP - * @state: NFP FW info state - * @cpp_id: Pointer to the CPP ID of the MIP - * @off: Pointer to the CPP Address of the MIP +/** + * Retrieve the location of the first FW's MIP + * + * @param state + * NFFW info pointer + * @param cpp_id + * Pointer to the CPP ID of the MIP + * @param off + * Pointer to the CPP Address of the MIP * - * Return: 0, or -ERRNO + * @return + * 0, or -ERRNO */ int -nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, - uint64_t *off) +nfp_nffw_info_mip_first(struct nfp_nffw_info *state, + uint32_t *cpp_id, + uint64_t *offset) { struct nffw_fwinfo *fwinfo; @@ -215,20 +274,13 @@ nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, return -EINVAL; *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); - *off = nffw_fwinfo_mip_offset_get(fwinfo); - - if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { - int locality_off; - - if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) - return 0; + *offset = nffw_fwinfo_mip_offset_get(fwinfo); - locality_off = nfp_mip_mu_locality_lsb(state->cpp); - if (locality_off < 0) - return locality_off; + if (nffw_fwinfo_mip_mu_da_get(fwinfo) != 0) { + int locality_off = nfp_cpp_mu_locality_lsb(state->cpp); - *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); - *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + *offset &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *offset |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; } return 0; diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.h b/drivers/net/nfp/nfpcore/nfp_nffw.h index 46ac8a8d07e..fd46009d377 100644 --- a/drivers/net/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/nfp/nfpcore/nfp_nffw.h @@ -8,69 +8,11 @@ #include "nfp_cpp.h" -/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. - * Lower IDs are reserved for target and loader IDs. - */ -#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ -#define NFFW_FWID_BASE 4 - -#define NFFW_FWID_ALL 255 - -/** - * NFFW_INFO_VERSION history: - * 0: This was never actually used (before versioning), but it refers to - * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later - * changed to 200. - * 1: First versioned struct, with - * FWINFO_CNT = 120 - * MEINFO_CNT = 120 - * 2: FWINFO_CNT = 200 - * MEINFO_CNT = 200 - */ -#define NFFW_INFO_VERSION_CURRENT 2 - -/* Enough for all current chip families */ -#define NFFW_MEINFO_CNT_V1 120 -#define NFFW_FWINFO_CNT_V1 120 -#define NFFW_MEINFO_CNT_V2 200 -#define NFFW_FWINFO_CNT_V2 200 - -struct nffw_meinfo { - uint32_t ctxmask__fwid__meid; -}; - -struct nffw_fwinfo { - uint32_t loaded__mu_da__mip_off_hi; - uint32_t mip_cppid; /* 0 means no MIP */ - uint32_t mip_offset_lo; -}; - -struct nfp_nffw_info_v1 { - struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; - struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; -}; - -struct nfp_nffw_info_v2 { - struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; - struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; -}; - -struct nfp_nffw_info_data { - uint32_t flags[2]; - union { - struct nfp_nffw_info_v1 v1; - struct nfp_nffw_info_v2 v2; - } info; -}; - -struct nfp_nffw_info { - struct nfp_cpp *cpp; - struct nfp_resource *res; - - struct nfp_nffw_info_data fwinf; -}; +struct nfp_nffw_info; struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); void nfp_nffw_info_close(struct nfp_nffw_info *state); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *offset); -#endif +#endif /* __NFP_NFFW_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c index 1f6b7bd85cf..5b804f6174b 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/nfp/nfpcore/nfp_nsp.c @@ -3,26 +3,135 @@ * All rights reserved. */ -#define NFP_SUBSYS "nfp_nsp" - -#include -#include +#include "nfp_nsp.h" #include -#include "nfp_cpp.h" #include "nfp_logs.h" -#include "nfp_nsp.h" +#include "nfp_platform.h" #include "nfp_resource.h" -int +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY RTE_BIT64(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_DMA_BUF RTE_BIT64(1) +#define NSP_COMMAND_START RTE_BIT64(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0) + +#define NSP_DFLT_BUFFER 0x18 +#define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_SIZE_4KB GENMASK_ULL(15, 8) +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR 8 + +#define NSP_CODE_MAJOR GENMASK_ULL(15, 12) +#define NSP_CODE_MINOR GENMASK_ULL(11, 0) + +#define NFP_FW_LOAD_RET_MAJOR GENMASK_ULL(15, 8) +#define NFP_FW_LOAD_RET_MINOR GENMASK_ULL(23, 16) + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */ + SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ + SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ + SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ + SPCODE_HWINFO_SET = 18, /* Set HWinfo entry */ + SPCODE_FW_LOADED = 19, /* Is application firmware loaded */ + SPCODE_VERSIONS = 21, /* Report FW versions */ + SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ + SPCODE_READ_MEDIA = 23, /* Get the supported/advertised media for a port */ +}; + +static const struct { + uint32_t code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; + struct { + uint16_t major; + uint16_t minor; + } ver; + + /** Eth table config state */ + bool modified; + uint32_t idx; + void *entries; +}; + +/* NFP command argument structure */ +struct nfp_nsp_command_arg { + uint16_t code; /**< NFP SP Command Code */ + bool dma; /**< @buf points to a host buffer, not NSP buffer */ + bool error_quiet; /**< Don't print command error/warning */ + uint32_t timeout_sec; /**< Timeout value to wait for completion in seconds */ + uint32_t option; /**< NSP Command Argument */ + uint64_t buf; /**< NSP Buffer Address */ + /** Callback for interpreting option if error occurred */ + void (*error_cb)(struct nfp_nsp *state, uint32_t ret_val); +}; + +/* NFP command with buffer argument structure */ +struct nfp_nsp_command_buf_arg { + struct nfp_nsp_command_arg arg; /**< NFP command argument structure */ + const void *in_buf; /**< Buffer with data for input */ + void *out_buf; /**< Buffer for output data */ + uint32_t in_size; /**< Size of @in_buf */ + uint32_t out_size; /**< Size of @out_buf */ +}; + +struct nfp_cpp * +nfp_nsp_cpp(struct nfp_nsp *state) +{ + return state->cpp; +} + +bool nfp_nsp_config_modified(struct nfp_nsp *state) { return state->modified; } void -nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified) +nfp_nsp_config_set_modified(struct nfp_nsp *state, + bool modified) { state->modified = modified; } @@ -33,14 +142,16 @@ nfp_nsp_config_entries(struct nfp_nsp *state) return state->entries; } -unsigned int +uint32_t nfp_nsp_config_idx(struct nfp_nsp *state) { return state->idx; } void -nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +nfp_nsp_config_set_state(struct nfp_nsp *state, + void *entries, + uint32_t idx) { state->entries = entries; state->idx = idx; @@ -56,30 +167,33 @@ nfp_nsp_config_clear_state(struct nfp_nsp *state) static void nfp_nsp_print_extended_error(uint32_t ret_val) { - int i; + uint32_t i; if (ret_val == 0) return; - for (i = 0; i < (int)RTE_DIM(nsp_errors); i++) - if (ret_val == (uint32_t)nsp_errors[i].code) + for (i = 0; i < RTE_DIM(nsp_errors); i++) + if (ret_val == nsp_errors[i].code) PMD_DRV_LOG(ERR, "err msg: %s", nsp_errors[i].msg); } static int nfp_nsp_check(struct nfp_nsp *state) { - struct nfp_cpp *cpp = state->cpp; - uint64_t nsp_status, reg; - uint32_t nsp_cpp; int err; + uint64_t reg; + uint32_t nsp_cpp; + uint64_t nsp_status; + struct nfp_cpp *cpp = state->cpp; nsp_cpp = nfp_resource_cpp_id(state->res); nsp_status = nfp_resource_address(state->res) + NSP_STATUS; err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); - if (err < 0) + if (err < 0) { + PMD_DRV_LOG(ERR, "NSP - CPP readq failed %d", err); return err; + } if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { PMD_DRV_LOG(ERR, "Cannot detect NFP Service Processor"); @@ -91,11 +205,11 @@ nfp_nsp_check(struct nfp_nsp *state) if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) { PMD_DRV_LOG(ERR, "Unsupported ABI %hu.%hu", state->ver.major, - state->ver.minor); + state->ver.minor); return -EINVAL; } - if (reg & NSP_STATUS_BUSY) { + if ((reg & NSP_STATUS_BUSY) != 0) { PMD_DRV_LOG(ERR, "Service processor busy!"); return -EBUSY; } @@ -103,20 +217,24 @@ nfp_nsp_check(struct nfp_nsp *state) return 0; } -/* - * nfp_nsp_open() - Prepare for communication and lock the NSP resource. - * @cpp: NFP CPP Handle +/** + * Prepare for communication and lock the NSP resource. + * + * @param cpp + * NFP CPP Handle */ struct nfp_nsp * nfp_nsp_open(struct nfp_cpp *cpp) { - struct nfp_resource *res; - struct nfp_nsp *state; int err; + struct nfp_nsp *state; + struct nfp_resource *res; res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); - if (res == NULL) + if (res == NULL) { + PMD_DRV_LOG(ERR, "NSP - resource acquire failed"); return NULL; + } state = malloc(sizeof(*state)); if (state == NULL) { @@ -128,7 +246,8 @@ nfp_nsp_open(struct nfp_cpp *cpp) state->res = res; err = nfp_nsp_check(state); - if (err) { + if (err != 0) { + PMD_DRV_LOG(ERR, "NSP - check failed"); nfp_nsp_close(state); return NULL; } @@ -136,9 +255,11 @@ nfp_nsp_open(struct nfp_cpp *cpp) return state; } -/* - * nfp_nsp_close() - Clean up and unlock the NSP resource. - * @state: NFP SP state +/** + * Clean up and unlock the NSP resource. + * + * @param state + * NFP SP state */ void nfp_nsp_close(struct nfp_nsp *state) @@ -160,57 +281,66 @@ nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) } static int -nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp, - uint64_t addr, uint64_t mask, uint64_t val) +nfp_nsp_wait_reg(struct nfp_cpp *cpp, + uint64_t *reg, + uint32_t nsp_cpp, + uint64_t addr, + uint64_t mask, + uint64_t val) { - struct timespec wait; - int count; int err; + uint32_t count = 0; + struct timespec wait; wait.tv_sec = 0; - wait.tv_nsec = 25000000; - count = 0; + wait.tv_nsec = 25000000; /* 25ms */ for (;;) { err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); - if (err < 0) + if (err < 0) { + PMD_DRV_LOG(ERR, "NSP - CPP readq failed"); return err; + } if ((*reg & mask) == val) return 0; nanosleep(&wait, 0); - if (count++ > 1000) + if (count++ > 1000) /* 25ms * 1000 = 25s */ return -ETIMEDOUT; } } -/* - * nfp_nsp_command() - Execute a command on the NFP Service Processor - * @state: NFP SP state - * @code: NFP SP Command Code - * @option: NFP SP Command Argument - * @buff_cpp: NFP SP Buffer CPP Address info - * @buff_addr: NFP SP Buffer Host address - * - * Return: 0 for success with no result +/** + * Execute a command on the NFP Service Processor * - * positive value for NSP completion with a result code + * @param state + * NFP SP state + * @param arg + * NFP command argument structure * - * -EAGAIN if the NSP is not yet present - * -ENODEV if the NSP is not a supported model - * -EBUSY if the NSP is stuck - * -EINTR if interrupted while waiting for completion - * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + * @return + * - 0 for success with no result + * - Positive value for NSP completion with a result code + * - -EAGAIN if the NSP is not yet present + * - -ENODEV if the NSP is not a supported model + * - -EBUSY if the NSP is stuck + * - -EINTR if interrupted while waiting for completion + * - -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete */ static int -nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option, - uint32_t buff_cpp, uint64_t buff_addr) +nfp_nsp_command_real(struct nfp_nsp *state, + const struct nfp_nsp_command_arg *arg) { - uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; - struct nfp_cpp *cpp = state->cpp; - uint32_t nsp_cpp; int err; + uint64_t reg; + uint32_t nsp_cpp; + uint64_t ret_val; + uint64_t nsp_base; + uint64_t nsp_buffer; + uint64_t nsp_status; + uint64_t nsp_command; + struct nfp_cpp *cpp = state->cpp; nsp_cpp = nfp_resource_cpp_id(state->res); nsp_base = nfp_resource_address(state->res); @@ -219,126 +349,128 @@ nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option, nsp_buffer = nsp_base + NSP_BUFFER; err = nfp_nsp_check(state); - if (err) + if (err != 0) { + PMD_DRV_LOG(ERR, "Check NSP command failed"); return err; - - if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || - !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { - PMD_DRV_LOG(ERR, "Host buffer out of reach %08x %" PRIx64, - buff_cpp, buff_addr); - return -EINVAL; } - err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, - FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | - FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, arg->buf); if (err < 0) return err; err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, - FIELD_PREP(NSP_COMMAND_OPTION, option) | - FIELD_PREP(NSP_COMMAND_CODE, code) | - FIELD_PREP(NSP_COMMAND_START, 1)); + FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE, arg->code) | + FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | + FIELD_PREP(NSP_COMMAND_START, 1)); if (err < 0) return err; /* Wait for NSP_COMMAND_START to go to 0 */ err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, - NSP_COMMAND_START, 0); - if (err) { - PMD_DRV_LOG(ERR, "Error %d waiting for code 0x%04x to start", - err, code); + NSP_COMMAND_START, 0); + if (err != 0) { + PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to start", + err, arg->code); return err; } /* Wait for NSP_STATUS_BUSY to go to 0 */ - err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, - 0); - if (err) { - PMD_DRV_LOG(ERR, "Error %d waiting for code 0x%04x to start", - err, code); + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, + NSP_STATUS_BUSY, 0); + if (err != 0) { + PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to complete", + err, arg->code); return err; } err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); if (err < 0) return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); err = FIELD_GET(NSP_STATUS_RESULT, reg); - if (err) { - PMD_DRV_LOG(ERR, "Result (error) code set: %d (%d) command: %d", - -err, (int)ret_val, code); - nfp_nsp_print_extended_error(ret_val); + if (err != 0) { + if (!arg->error_quiet) + PMD_DRV_LOG(WARNING, "Result (error) code set: %d (%d) command: %d", + -err, (int)ret_val, arg->code); + + if (arg->error_cb != 0) + arg->error_cb(state, ret_val); + else + nfp_nsp_print_extended_error(ret_val); + return -err; } return ret_val; } -#define SZ_1M 0x00100000 - static int -nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size) +nfp_nsp_command(struct nfp_nsp *state, + uint16_t code) { - struct nfp_cpp *cpp = nsp->cpp; - unsigned int max_size; - uint64_t reg, cpp_buf; - int ret, err; - uint32_t cpp_id; + const struct nfp_nsp_command_arg arg = { + .code = code, + }; - if (nsp->ver.minor < 13) { - PMD_DRV_LOG(ERR, "NSP: Code 0x%04x with buffer not supported ABI %hu.%hu)", - code, nsp->ver.major, nsp->ver.minor); - return -EOPNOTSUPP; - } - - err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), - nfp_resource_address(nsp->res) + - NSP_DFLT_BUFFER_CONFIG, - ®); - if (err < 0) - return err; + return nfp_nsp_command_real(state, &arg); +} - max_size = RTE_MAX(in_size, out_size); - if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { - PMD_DRV_LOG(ERR, "NSP: default buffer too small for command 0x%04x (%llu < %u)", - code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, max_size); - return -EINVAL; - } +static int +nfp_nsp_command_buf_def(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg) +{ + int err; + int ret; + uint64_t reg; + uint32_t cpp_id; + uint64_t cpp_buf; + struct nfp_cpp *cpp = nsp->cpp; err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), - nfp_resource_address(nsp->res) + - NSP_DFLT_BUFFER, - ®); + nfp_resource_address(nsp->res) + NSP_DFLT_BUFFER, + ®); if (err < 0) return err; - cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; - cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); - if (in_buf && in_size) { - err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (arg->in_buf != NULL && arg->in_size > 0) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, + arg->in_buf, arg->in_size); if (err < 0) return err; } + /* Zero out remaining part of the buffer */ - if (out_buf && out_size && out_size > in_size) { - memset(out_buf, 0, out_size - in_size); - err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf, - out_size - in_size); + if (arg->out_buf != NULL && arg->out_size > arg->in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size, + arg->out_buf, arg->out_size - arg->in_size); if (err < 0) return err; } - ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); - if (ret < 0) + if (!FIELD_FIT(NSP_BUFFER_CPP, cpp_id >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, cpp_buf)) { + PMD_DRV_LOG(ERR, "Buffer out of reach %#08x %#016lx", + cpp_id, cpp_buf); + return -EINVAL; + } + + arg->arg.buf = FIELD_PREP(NSP_BUFFER_CPP, cpp_id >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, cpp_buf); + ret = nfp_nsp_command_real(nsp, &arg->arg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "NSP command failed"); return ret; + } - if (out_buf && out_size) { - err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (arg->out_buf != NULL && arg->out_size > 0) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, + arg->out_buf, arg->out_size); if (err < 0) return err; } @@ -346,30 +478,67 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option, return ret; } +#define SZ_1M 0x00100000 +#define SZ_4K 0x00001000 + +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg) +{ + int err; + size_t size; + uint64_t reg; + size_t max_size; + struct nfp_cpp *cpp = nsp->cpp; + + if (nsp->ver.minor < 13) { + PMD_DRV_LOG(ERR, "NSP: Code %#04x with buffer not supported ABI %hu.%hu)", + arg->arg.code, nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + max_size = RTE_MAX(arg->in_size, arg->out_size); + size = FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M + + FIELD_GET(NSP_DFLT_BUFFER_SIZE_4KB, reg) * SZ_4K; + if (size < max_size) { + PMD_DRV_LOG(ERR, "NSP: default buffer too small for command %#04x (%zu < %zu)", + arg->arg.code, size, max_size); + return -EINVAL; + } + + return nfp_nsp_command_buf_def(nsp, arg); +} + int nfp_nsp_wait(struct nfp_nsp *state) { - struct timespec wait; - int count; int err; + int count = 0; + struct timespec wait; wait.tv_sec = 0; - wait.tv_nsec = 25000000; - count = 0; + wait.tv_nsec = 25000000; /* 25ms */ for (;;) { - err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + err = nfp_nsp_command(state, SPCODE_NOOP); if (err != -EAGAIN) break; nanosleep(&wait, 0); - if (count++ > 1000) { + if (count++ > 1000) { /* 25ms * 1000 = 25s */ err = -ETIMEDOUT; break; } } - if (err) + + if (err != 0) PMD_DRV_LOG(ERR, "NSP failed to respond %d", err); return err; @@ -378,48 +547,149 @@ nfp_nsp_wait(struct nfp_nsp *state) int nfp_nsp_device_soft_reset(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_SOFT_RESET); } int nfp_nsp_mac_reinit(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_MAC_INIT); +} + +static void +nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, + uint32_t ret_val) +{ + uint32_t minor; + uint32_t major; + static const char * const major_msg[] = { + /* 0 */ "Firmware from driver loaded", + /* 1 */ "Firmware from flash loaded", + /* 2 */ "Firmware loading failure", + }; + static const char * const minor_msg[] = { + /* 0 */ "", + /* 1 */ "no named partition on flash", + /* 2 */ "error reading from flash", + /* 3 */ "can not deflate", + /* 4 */ "not a trusted file", + /* 5 */ "can not parse FW file", + /* 6 */ "MIP not found in FW file", + /* 7 */ "null firmware name in MIP", + /* 8 */ "FW version none", + /* 9 */ "FW build number none", + /* 10 */ "no FW selection policy HWInfo key found", + /* 11 */ "static FW selection policy", + /* 12 */ "FW version has precedence", + /* 13 */ "different FW application load requested", + /* 14 */ "development build", + }; + + major = FIELD_GET(NFP_FW_LOAD_RET_MAJOR, ret_val); + minor = FIELD_GET(NFP_FW_LOAD_RET_MINOR, ret_val); + + if (!nfp_nsp_has_stored_fw_load(state)) + return; + + if (major >= RTE_DIM(major_msg)) + PMD_DRV_LOG(INFO, "FW loading status: %x", ret_val); + else if (minor >= RTE_DIM(minor_msg)) + PMD_DRV_LOG(INFO, "%s, reason code: %d", major_msg[major], minor); + else + PMD_DRV_LOG(INFO, "%s%c %s", major_msg[major], + minor != 0 ? ',' : '.', minor_msg[minor]); } int -nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size) +nfp_nsp_load_fw(struct nfp_nsp *state, + void *buf, + size_t size) { - return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size, - NULL, 0); + int ret; + struct nfp_nsp_command_buf_arg load_fw = { + { + .code = SPCODE_FW_LOAD, + .option = size, + .error_cb = nfp_nsp_load_fw_extended_msg, + }, + .in_buf = buf, + .in_size = size, + }; + + ret = nfp_nsp_command_buf(state, &load_fw); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + + return 0; } int -nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +nfp_nsp_read_eth_table(struct nfp_nsp *state, + void *buf, + size_t size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg eth_rescan = { + { + .code = SPCODE_ETH_RESCAN, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, ð_rescan); } int -nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, - unsigned int size) +nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, + size_t size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, - NULL, 0); + struct nfp_nsp_command_buf_arg eth_ctrl = { + { + .code = SPCODE_ETH_CONTROL, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, ð_ctrl); } int -nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +nfp_nsp_read_identify(struct nfp_nsp *state, + void *buf, + size_t size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg identify = { + { + .code = SPCODE_NSP_IDENTIFY, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &identify); } int -nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, - unsigned int size) +nfp_nsp_read_sensors(struct nfp_nsp *state, + uint32_t sensor_mask, + void *buf, + size_t size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL, - 0, buf, size); + struct nfp_nsp_command_buf_arg sensors = { + { + .code = SPCODE_NSP_SENSORS, + .option = sensor_mask, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &sensors); } diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h index 9905b2d3d35..fe52dffeb7f 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -3,106 +3,12 @@ * All rights reserved. */ -#ifndef NSP_NSP_H -#define NSP_NSP_H 1 +#ifndef __NSP_NSP_H__ +#define __NSP_NSP_H__ #include "nfp_cpp.h" -#include "nfp_nsp.h" - -#define GENMASK_ULL(h, l) \ - (((~0ULL) - (1ULL << (l)) + 1) & \ - (~0ULL >> (64 - 1 - (h)))) - -#define __bf_shf(x) (__builtin_ffsll(x) - 1) - -#define FIELD_GET(_mask, _reg) \ - (__extension__ ({ \ - typeof(_mask) _x = (_mask); \ - (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ - })) - -#define FIELD_FIT(_mask, _val) \ - (__extension__ ({ \ - typeof(_mask) _x = (_mask); \ - !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ - })) - -#define FIELD_PREP(_mask, _val) \ - (__extension__ ({ \ - typeof(_mask) _x = (_mask); \ - ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ - })) - -/* Offsets relative to the CSR base */ -#define NSP_STATUS 0x00 -#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) -#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) -#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) -#define NSP_STATUS_CODE GENMASK_ULL(31, 16) -#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) -#define NSP_STATUS_BUSY RTE_BIT64(0) - -#define NSP_COMMAND 0x08 -#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) -#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) -#define NSP_COMMAND_START RTE_BIT64(0) - -/* CPP address to retrieve the data from */ -#define NSP_BUFFER 0x10 -#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) -#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) -#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) - -#define NSP_DFLT_BUFFER 0x18 - -#define NSP_DFLT_BUFFER_CONFIG 0x20 -#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) - -#define NSP_MAGIC 0xab10 -#define NSP_MAJOR 0 -#define NSP_MINOR 8 - -#define NSP_CODE_MAJOR GENMASK(15, 12) -#define NSP_CODE_MINOR GENMASK(11, 0) - -enum nfp_nsp_cmd { - SPCODE_NOOP = 0, /* No operation */ - SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ - SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ - SPCODE_PHY_INIT = 3, /* Initialize the PHY */ - SPCODE_MAC_INIT = 4, /* Initialize the MAC */ - SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ - SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ - SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ - SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ - SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ - SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ -}; - -static const struct { - int code; - const char *msg; -} nsp_errors[] = { - { 6010, "could not map to phy for port" }, - { 6011, "not an allowed rate/lanes for port" }, - { 6012, "not an allowed rate/lanes for port" }, - { 6013, "high/low error, change other port first" }, - { 6014, "config not found in flash" }, -}; -struct nfp_nsp { - struct nfp_cpp *cpp; - struct nfp_resource *res; - struct { - uint16_t major; - uint16_t minor; - } ver; - - /* Eth table config state */ - int modified; - unsigned int idx; - void *entries; -}; +struct nfp_nsp; struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); void nfp_nsp_close(struct nfp_nsp *state); @@ -110,25 +16,69 @@ uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); -int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, size_t size); int nfp_nsp_mac_reinit(struct nfp_nsp *state); -int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); -int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, - void *buf, unsigned int size); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, size_t size); +int nfp_nsp_read_sensors(struct nfp_nsp *state, uint32_t sensor_mask, + void *buf, size_t size); -static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state) +static inline bool +nfp_nsp_has_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_get_abi_ver_minor(state) > 20; } +static inline bool +nfp_nsp_has_stored_fw_load(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 23; +} + +static inline bool +nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 24; +} + +static inline bool +nfp_nsp_has_hwinfo_set(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + +static inline bool +nfp_nsp_has_fw_loaded(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + +static inline bool +nfp_nsp_has_versions(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 27; +} + +static inline bool +nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 28; +} + +static inline bool +nfp_nsp_has_read_media(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 33; +} + enum nfp_eth_interface { - NFP_INTERFACE_NONE = 0, - NFP_INTERFACE_SFP = 1, - NFP_INTERFACE_SFPP = 10, - NFP_INTERFACE_SFP28 = 28, - NFP_INTERFACE_QSFP = 40, - NFP_INTERFACE_CXP = 100, - NFP_INTERFACE_QSFP28 = 112, + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_RJ45 = 45, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, }; enum nfp_eth_media { @@ -152,142 +102,106 @@ enum nfp_eth_fec { NFP_FEC_DISABLED_BIT, }; -#define NFP_FEC_AUTO RTE_BIT32(NFP_FEC_AUTO_BIT) -#define NFP_FEC_BASER RTE_BIT32(NFP_FEC_BASER_BIT) -#define NFP_FEC_REED_SOLOMON RTE_BIT32(NFP_FEC_REED_SOLOMON_BIT) -#define NFP_FEC_DISABLED RTE_BIT32(NFP_FEC_DISABLED_BIT) - -/** - * struct nfp_eth_table - ETH table information - * @count: number of table entries - * @max_index: max of @index fields of all @ports - * @ports: table of ports - * - * @eth_index: port index according to legacy ethX numbering - * @index: chip-wide first channel index - * @nbi: NBI index - * @base: first channel index (within NBI) - * @lanes: number of channels - * @speed: interface speed (in Mbps) - * @interface: interface (module) plugged in - * @media: media type of the @interface - * @fec: forward error correction mode - * @aneg: auto negotiation mode - * @mac_addr: interface MAC address - * @label_port: port id - * @label_subport: id of interface within port (for split ports) - * @enabled: is enabled? - * @tx_enabled: is TX enabled? - * @rx_enabled: is RX enabled? - * @override_changed: is media reconfig pending? - * - * @port_type: one of %PORT_* defines for ethtool - * @port_lanes: total number of lanes on the port (sum of lanes of all subports) - * @is_split: is interface part of a split port - * @fec_modes_supported: bitmap of FEC modes supported - */ +#define NFP_FEC_AUTO RTE_BIT32(NFP_FEC_AUTO_BIT) +#define NFP_FEC_BASER RTE_BIT32(NFP_FEC_BASER_BIT) +#define NFP_FEC_REED_SOLOMON RTE_BIT32(NFP_FEC_REED_SOLOMON_BIT) +#define NFP_FEC_DISABLED RTE_BIT32(NFP_FEC_DISABLED_BIT) + +/* ETH table information */ struct nfp_eth_table { - unsigned int count; - unsigned int max_index; + uint32_t count; /**< Number of table entries */ + uint32_t max_index; /**< Max of @index fields of all @ports */ struct nfp_eth_table_port { - unsigned int eth_index; - unsigned int index; - unsigned int nbi; - unsigned int base; - unsigned int lanes; - unsigned int speed; + /** Port index according to legacy ethX numbering */ + uint32_t eth_index; + uint32_t index; /**< Chip-wide first channel index */ + uint32_t nbi; /**< NBI index */ + uint32_t base; /**< First channel index (within NBI) */ + uint32_t lanes; /**< Number of channels */ + uint32_t speed; /**< Interface speed (in Mbps) */ - unsigned int interface; - enum nfp_eth_media media; + uint32_t interface; /**< Interface (module) plugged in */ + enum nfp_eth_media media; /**< Media type of the @interface */ - enum nfp_eth_fec fec; - enum nfp_eth_aneg aneg; + enum nfp_eth_fec fec; /**< Forward Error Correction mode */ + enum nfp_eth_fec act_fec; /**< Active Forward Error Correction mode */ + enum nfp_eth_aneg aneg; /**< Auto negotiation mode */ - struct rte_ether_addr mac_addr; + struct rte_ether_addr mac_addr; /**< Interface MAC address */ - uint8_t label_port; + uint8_t label_port; /**< Port id */ + /** Id of interface within port (for split ports) */ uint8_t label_subport; - int enabled; - int tx_enabled; - int rx_enabled; + bool enabled; /**< Enable port */ + bool tx_enabled; /**< Enable TX */ + bool rx_enabled; /**< Enable RX */ + bool supp_aneg; /**< Support auto negotiation */ - int override_changed; + bool override_changed; /**< Media reconfig pending */ - /* Computed fields */ - uint8_t port_type; + uint8_t port_type; /**< One of %PORT_* */ + /** Sum of lanes of all subports of this port */ + uint32_t port_lanes; - unsigned int port_lanes; + bool is_split; /**< Split port */ - int is_split; - - unsigned int fec_modes_supported; - } ports[]; + uint32_t fec_modes_supported; /**< Bitmap of FEC modes supported */ + } ports[]; /**< Table of ports */ }; struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); -int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable); -int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, - int configed); -int -nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode); +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, uint32_t idx, bool enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, uint32_t idx, bool configured); +int nfp_eth_set_fec(struct nfp_cpp *cpp, uint32_t idx, enum nfp_eth_fec mode); -int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, size_t size); int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, - unsigned int size); + size_t size); void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, - unsigned int idx); + uint32_t idx); void nfp_nsp_config_clear_state(struct nfp_nsp *state); -void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified); void *nfp_nsp_config_entries(struct nfp_nsp *state); -int nfp_nsp_config_modified(struct nfp_nsp *state); -unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state); +bool nfp_nsp_config_modified(struct nfp_nsp *state); +uint32_t nfp_nsp_config_idx(struct nfp_nsp *state); -static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) +static inline bool +nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) { - return !!eth_port->fec_modes_supported; + return eth_port->fec_modes_supported != 0; } -static inline unsigned int +static inline uint32_t nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port) { return eth_port->fec_modes_supported; } -struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, uint32_t idx); int nfp_eth_config_commit_end(struct nfp_nsp *nsp); void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); -int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); -int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); -int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); - -/** - * struct nfp_nsp_identify - NSP static information - * @version: opaque version string - * @flags: version flags - * @br_primary: branch id of primary bootloader - * @br_secondary: branch id of secondary bootloader - * @br_nsp: branch id of NSP - * @primary: version of primary bootloader - * @secondary: version id of secondary bootloader - * @nsp: version id of NSP - * @sensor_mask: mask of present sensors available on NIC - */ +int nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int nfp_eth_set_speed(struct nfp_nsp *nsp, uint32_t speed); +int nfp_eth_set_split(struct nfp_nsp *nsp, uint32_t lanes); + +/* NSP static information */ struct nfp_nsp_identify { - char version[40]; - uint8_t flags; - uint8_t br_primary; - uint8_t br_secondary; - uint8_t br_nsp; - uint16_t primary; - uint16_t secondary; - uint16_t nsp; - uint64_t sensor_mask; + char version[40]; /**< Opaque version string */ + uint8_t flags; /**< Version flags */ + uint8_t br_primary; /**< Branch id of primary bootloader */ + uint8_t br_secondary; /**< Branch id of secondary bootloader */ + uint8_t br_nsp; /**< Branch id of NSP */ + uint16_t primary; /**< Version of primary bootloader */ + uint16_t secondary; /**< Version id of secondary bootloader */ + uint16_t nsp; /**< Version id of NSP */ + uint64_t sensor_mask; /**< Mask of present sensors available on NIC */ }; -struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); +struct nfp_nsp_identify *nfp_nsp_identify(struct nfp_nsp *nsp); enum nfp_nsp_sensor_id { NFP_SENSOR_CHIP_TEMPERATURE, @@ -297,6 +211,6 @@ enum nfp_nsp_sensor_id { }; int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, - long *val); + uint32_t *val); -#endif +#endif /* __NSP_NSP_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c index 21b338461e4..46fa5467de9 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c +++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c @@ -3,12 +3,8 @@ * All rights reserved. */ -#include -#include -#include "nfp_cpp.h" #include "nfp_logs.h" #include "nfp_nsp.h" -#include "nfp_nffw.h" struct nsp_identify { uint8_t version[40]; @@ -24,11 +20,11 @@ struct nsp_identify { }; struct nfp_nsp_identify * -__nfp_nsp_identify(struct nfp_nsp *nsp) +nfp_nsp_identify(struct nfp_nsp *nsp) { - struct nfp_nsp_identify *nspi = NULL; - struct nsp_identify *ni; int ret; + struct nsp_identify *ni; + struct nfp_nsp_identify *nspi = NULL; if (nfp_nsp_get_abi_ver_minor(nsp) < 15) return NULL; @@ -73,11 +69,13 @@ struct nfp_sensors { }; int -nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val) +nfp_hwmon_read_sensor(struct nfp_cpp *cpp, + enum nfp_nsp_sensor_id id, + uint32_t *val) { - struct nfp_sensors s; - struct nfp_nsp *nsp; int ret; + struct nfp_nsp *nsp; + struct nfp_sensors s; nsp = nfp_nsp_open(cpp); if (nsp == NULL) @@ -105,5 +103,6 @@ nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val) default: return -EINVAL; } + return 0; } diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c index 01b46522802..cc472907ca2 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c @@ -3,103 +3,61 @@ * All rights reserved. */ -#include -#include -#include -#include "nfp_cpp.h" #include "nfp_logs.h" #include "nfp_nsp.h" -#include "nfp6000/nfp6000.h" - -#define GENMASK_ULL(h, l) \ - (((~0ULL) - (1ULL << (l)) + 1) & \ - (~0ULL >> (64 - 1 - (h)))) - -#define __bf_shf(x) (__builtin_ffsll(x) - 1) - -#define FIELD_GET(_mask, _reg) \ - (__extension__ ({ \ - typeof(_mask) _x = (_mask); \ - (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ - })) - -#define FIELD_FIT(_mask, _val) \ - (__extension__ ({ \ - typeof(_mask) _x = (_mask); \ - !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ - })) - -#define FIELD_PREP(_mask, _val) \ - (__extension__ ({ \ - typeof(_mask) _x = (_mask); \ - ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ - })) - -#define NSP_ETH_NBI_PORT_COUNT 24 -#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) -#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ - sizeof(union eth_table_entry)) - -#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) -#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) -#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) -#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) -#define NSP_ETH_PORT_FEC_SUPP_BASER RTE_BIT64(60) -#define NSP_ETH_PORT_FEC_SUPP_RS RTE_BIT64(61) - -#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) - -#define NSP_ETH_STATE_CONFIGURED RTE_BIT64(0) -#define NSP_ETH_STATE_ENABLED RTE_BIT64(1) -#define NSP_ETH_STATE_TX_ENABLED RTE_BIT64(2) -#define NSP_ETH_STATE_RX_ENABLED RTE_BIT64(3) -#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) -#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) -#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) -#define NSP_ETH_STATE_OVRD_CHNG RTE_BIT64(22) -#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) -#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) - -#define NSP_ETH_CTRL_CONFIGURED RTE_BIT64(0) -#define NSP_ETH_CTRL_ENABLED RTE_BIT64(1) -#define NSP_ETH_CTRL_TX_ENABLED RTE_BIT64(2) -#define NSP_ETH_CTRL_RX_ENABLED RTE_BIT64(3) -#define NSP_ETH_CTRL_SET_RATE RTE_BIT64(4) -#define NSP_ETH_CTRL_SET_LANES RTE_BIT64(5) -#define NSP_ETH_CTRL_SET_ANEG RTE_BIT64(6) -#define NSP_ETH_CTRL_SET_FEC RTE_BIT64(7) +#include "nfp_platform.h" + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * sizeof(union eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) +#define NSP_ETH_PORT_FEC_SUPP_BASER RTE_BIT64(60) +#define NSP_ETH_PORT_FEC_SUPP_RS RTE_BIT64(61) +#define NSP_ETH_PORT_SUPP_ANEG RTE_BIT64(63) + +#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_CONFIGURED RTE_BIT64(0) +#define NSP_ETH_STATE_ENABLED RTE_BIT64(1) +#define NSP_ETH_STATE_TX_ENABLED RTE_BIT64(2) +#define NSP_ETH_STATE_RX_ENABLED RTE_BIT64(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG RTE_BIT64(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) +#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28) + +#define NSP_ETH_CTRL_CONFIGURED RTE_BIT64(0) +#define NSP_ETH_CTRL_ENABLED RTE_BIT64(1) +#define NSP_ETH_CTRL_TX_ENABLED RTE_BIT64(2) +#define NSP_ETH_CTRL_RX_ENABLED RTE_BIT64(3) +#define NSP_ETH_CTRL_SET_RATE RTE_BIT64(4) +#define NSP_ETH_CTRL_SET_LANES RTE_BIT64(5) +#define NSP_ETH_CTRL_SET_ANEG RTE_BIT64(6) +#define NSP_ETH_CTRL_SET_FEC RTE_BIT64(7) /* Which connector port. */ -#define PORT_TP 0x00 -#define PORT_AUI 0x01 -#define PORT_MII 0x02 -#define PORT_FIBRE 0x03 -#define PORT_BNC 0x04 -#define PORT_DA 0x05 -#define PORT_NONE 0xef -#define PORT_OTHER 0xff - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define SPEED_2500 2500 -#define SPEED_5000 5000 -#define SPEED_10000 10000 -#define SPEED_14000 14000 -#define SPEED_20000 20000 -#define SPEED_25000 25000 -#define SPEED_40000 40000 -#define SPEED_50000 50000 -#define SPEED_56000 56000 -#define SPEED_100000 100000 +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff enum nfp_eth_raw { NSP_ETH_RAW_PORT = 0, NSP_ETH_RAW_STATE, NSP_ETH_RAW_MAC, NSP_ETH_RAW_CONTROL, - - NSP_ETH_NUM_RAW + NSP_ETH_NUM_RAW, }; enum nfp_eth_rate { @@ -124,34 +82,34 @@ union eth_table_entry { static const struct { enum nfp_eth_rate rate; - unsigned int speed; + uint32_t speed; } nsp_eth_rate_tbl[] = { - { RATE_INVALID, 0, }, - { RATE_10M, SPEED_10, }, - { RATE_100M, SPEED_100, }, - { RATE_1G, SPEED_1000, }, - { RATE_10G, SPEED_10000, }, - { RATE_25G, SPEED_25000, }, + { RATE_INVALID, RTE_ETH_SPEED_NUM_NONE, }, + { RATE_10M, RTE_ETH_SPEED_NUM_10M, }, + { RATE_100M, RTE_ETH_SPEED_NUM_100M, }, + { RATE_1G, RTE_ETH_SPEED_NUM_1G, }, + { RATE_10G, RTE_ETH_SPEED_NUM_10G, }, + { RATE_25G, RTE_ETH_SPEED_NUM_25G, }, }; -static unsigned int +static uint32_t nfp_eth_rate2speed(enum nfp_eth_rate rate) { - int i; + uint32_t i; - for (i = 0; i < (int)RTE_DIM(nsp_eth_rate_tbl); i++) + for (i = 0; i < RTE_DIM(nsp_eth_rate_tbl); i++) if (nsp_eth_rate_tbl[i].rate == rate) return nsp_eth_rate_tbl[i].speed; return 0; } -static unsigned int -nfp_eth_speed2rate(unsigned int speed) +static enum nfp_eth_rate +nfp_eth_speed2rate(uint32_t speed) { - int i; + uint32_t i; - for (i = 0; i < (int)RTE_DIM(nsp_eth_rate_tbl); i++) + for (i = 0; i < RTE_DIM(nsp_eth_rate_tbl); i++) if (nsp_eth_rate_tbl[i].speed == speed) return nsp_eth_rate_tbl[i].rate; @@ -159,21 +117,25 @@ nfp_eth_speed2rate(unsigned int speed) } static void -nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src) +nfp_eth_copy_mac_reverse(uint8_t *dst, + const uint8_t *src) { - int i; + uint32_t i; for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) dst[RTE_ETHER_ADDR_LEN - i - 1] = src[i]; } static void -nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, - unsigned int index, struct nfp_eth_table_port *dst) +nfp_eth_port_translate(struct nfp_nsp *nsp, + const union eth_table_entry *src, + uint32_t index, + struct nfp_eth_table_port *dst) { - unsigned int rate; - unsigned int fec; - uint64_t port, state; + uint32_t fec; + uint64_t port; + uint32_t rate; + uint64_t state; port = rte_le_to_cpu_64(src->port); state = rte_le_to_cpu_64(src->state); @@ -212,36 +174,46 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT; fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port); dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT; - if (dst->fec_modes_supported) + if (dst->fec_modes_supported != 0) dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; - dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state); + dst->fec = FIELD_GET(NSP_ETH_STATE_FEC, state); + dst->act_fec = dst->fec; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 33) + return; + + dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state); + dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port); } static void nfp_eth_calc_port_geometry(struct nfp_eth_table *table) { - unsigned int i, j; + uint32_t i; + uint32_t j; for (i = 0; i < table->count; i++) { table->max_index = RTE_MAX(table->max_index, - table->ports[i].index); + table->ports[i].index); for (j = 0; j < table->count; j++) { if (table->ports[i].label_port != - table->ports[j].label_port) + table->ports[j].label_port) continue; + table->ports[i].port_lanes += table->ports[j].lanes; if (i == j) continue; + if (table->ports[i].label_subport == - table->ports[j].label_subport) + table->ports[j].label_subport) PMD_DRV_LOG(DEBUG, "Port %d subport %d is a duplicate", - table->ports[i].label_port, - table->ports[i].label_subport); + table->ports[i].label_port, + table->ports[i].label_subport); - table->ports[i].is_split = 1; + table->ports[i].is_split = true; } } } @@ -252,6 +224,9 @@ nfp_eth_calc_port_type(struct nfp_eth_table_port *entry) if (entry->interface == NFP_INTERFACE_NONE) { entry->port_type = PORT_NONE; return; + } else if (entry->interface == NFP_INTERFACE_RJ45) { + entry->port_type = PORT_TP; + return; } if (entry->media == NFP_MEDIA_FIBRE) @@ -261,13 +236,15 @@ nfp_eth_calc_port_type(struct nfp_eth_table_port *entry) } static struct nfp_eth_table * -__nfp_eth_read_ports(struct nfp_nsp *nsp) +nfp_eth_read_ports_real(struct nfp_nsp *nsp) { - union eth_table_entry *entries; - struct nfp_eth_table *table; + int ret; + uint32_t i; + uint32_t j; + int cnt = 0; uint32_t table_sz; - int i, j, ret, cnt = 0; - const struct rte_ether_addr *mac; + struct nfp_eth_table *table; + union eth_table_entry *entries; entries = malloc(NSP_ETH_TABLE_SIZE); if (entries == NULL) @@ -276,27 +253,22 @@ __nfp_eth_read_ports(struct nfp_nsp *nsp) memset(entries, 0, NSP_ETH_TABLE_SIZE); ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { - PMD_DRV_LOG(ERR, "reading port table failed %d", ret); + PMD_DRV_LOG(ERR, "Reading port table failed %d", ret); goto err; } - /* The NFP3800 NIC support 8 ports, but only 2 ports are valid, - * the rest 6 ports mac are all 0, ensure we don't use these port - */ - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) { - mac = (const struct rte_ether_addr *)entries[i].mac_addr; - if ((entries[i].port & NSP_ETH_PORT_LANES_MASK) && - !rte_is_zero_ether_addr(mac)) + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if ((entries[i].port & NSP_ETH_PORT_LANES_MASK) != 0) cnt++; - } - /* Some versions of flash will give us 0 instead of port count. For + /* + * Some versions of flash will give us 0 instead of port count. For * those that give a port count, verify it against the value calculated * above. */ - if (ret && ret != cnt) { - PMD_DRV_LOG(ERR, "table entry count (%d) unmatch entries present (%d)", - ret, cnt); + if (ret != 0 && ret != cnt) { + PMD_DRV_LOG(ERR, "Table entry count (%d) unmatch entries present (%d)", + ret, cnt); goto err; } @@ -308,15 +280,12 @@ __nfp_eth_read_ports(struct nfp_nsp *nsp) memset(table, 0, table_sz); table->count = cnt; for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) { - mac = (const struct rte_ether_addr *)entries[i].mac_addr; - if ((entries[i].port & NSP_ETH_PORT_LANES_MASK) && - !rte_is_zero_ether_addr(mac)) - nfp_eth_port_translate(nsp, &entries[i], i, - &table->ports[j++]); + if ((entries[i].port & NSP_ETH_PORT_LANES_MASK) != 0) + nfp_eth_port_translate(nsp, &entries[i], i, &table->ports[j++]); } nfp_eth_calc_port_geometry(table); - for (i = 0; i < (int)table->count; i++) + for (i = 0; i < table->count; i++) nfp_eth_calc_port_type(&table->ports[i]); free(entries); @@ -328,37 +297,40 @@ __nfp_eth_read_ports(struct nfp_nsp *nsp) return NULL; } -/* - * nfp_eth_read_ports() - retrieve port information - * @cpp: NFP CPP handle +/** + * Read the port information from the device. * - * Read the port information from the device. Returned structure should - * be freed with kfree() once no longer needed. + * Returned structure should be freed once no longer needed. * - * Return: populated ETH table or NULL on error. + * @param cpp + * NFP CPP handle + * + * @return + * Populated ETH table or NULL on error. */ struct nfp_eth_table * nfp_eth_read_ports(struct nfp_cpp *cpp) { - struct nfp_eth_table *ret; struct nfp_nsp *nsp; + struct nfp_eth_table *ret; nsp = nfp_nsp_open(cpp); if (nsp == NULL) return NULL; - ret = __nfp_eth_read_ports(nsp); + ret = nfp_eth_read_ports_real(nsp); nfp_nsp_close(nsp); return ret; } struct nfp_nsp * -nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) +nfp_eth_config_start(struct nfp_cpp *cpp, + uint32_t idx) { - union eth_table_entry *entries; - struct nfp_nsp *nsp; int ret; + struct nfp_nsp *nsp; + union eth_table_entry *entries; entries = malloc(NSP_ETH_TABLE_SIZE); if (entries == NULL) @@ -373,12 +345,12 @@ nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { - PMD_DRV_LOG(ERR, "reading port table failed %d", ret); + PMD_DRV_LOG(ERR, "Reading port table failed %d", ret); goto err; } if ((entries[idx].port & NSP_ETH_PORT_LANES_MASK) == 0) { - PMD_DRV_LOG(ERR, "trying to set port state on disabled port %d", idx); + PMD_DRV_LOG(ERR, "Trying to set port state on disabled port %d", idx); goto err; } @@ -402,25 +374,25 @@ nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) free(entries); } -/* - * nfp_eth_config_commit_end() - perform recorded configuration changes - * @nsp: NFP NSP handle returned from nfp_eth_config_start() - * +/** * Perform the configuration which was requested with __nfp_eth_set_*() - * helpers and recorded in @nsp state. If device was already configured - * as requested or no __nfp_eth_set_*() operations were made no NSP command + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made, no NSP command * will be performed. * - * Return: - * 0 - configuration successful; - * 1 - no changes were needed; - * -ERRNO - configuration failed. + * @param nsp + * NFP NSP handle returned from nfp_eth_config_start() + * + * @return + * - (0) Configuration successful + * - (1) No changes were needed + * - (-ERRNO) Configuration failed */ int nfp_eth_config_commit_end(struct nfp_nsp *nsp) { - union eth_table_entry *entries = nfp_nsp_config_entries(nsp); int ret = 1; + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); if (nfp_nsp_config_modified(nsp)) { ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); @@ -432,30 +404,34 @@ nfp_eth_config_commit_end(struct nfp_nsp *nsp) return ret; } -/* - * nfp_eth_set_mod_enable() - set PHY module enable control bit - * @cpp: NFP CPP handle - * @idx: NFP chip-wide port index - * @enable: Desired state - * +/** * Enable or disable PHY module (this usually means setting the TX lanes * disable bits). * - * Return: - * 0 - configuration successful; - * 1 - no changes were needed; - * -ERRNO - configuration failed. + * @param cpp + * NFP CPP handle + * @param idx + * NFP chip-wide port index + * @param enable + * Desired state + * + * @return + * - (0) Configuration successful + * - (1) No changes were needed + * - (-ERRNO) Configuration failed */ int -nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable) +nfp_eth_set_mod_enable(struct nfp_cpp *cpp, + uint32_t idx, + bool enable) { - union eth_table_entry *entries; - struct nfp_nsp *nsp; uint64_t reg; + struct nfp_nsp *nsp; + union eth_table_entry *entries; nsp = nfp_eth_config_start(cpp, idx); if (nsp == NULL) - return -1; + return -EIO; entries = nfp_nsp_config_entries(nsp); @@ -467,31 +443,35 @@ nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable) reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); entries[idx].control = rte_cpu_to_le_64(reg); - nfp_nsp_config_set_modified(nsp, 1); + nfp_nsp_config_set_modified(nsp, true); } return nfp_eth_config_commit_end(nsp); } -/* - * nfp_eth_set_configured() - set PHY module configured control bit - * @cpp: NFP CPP handle - * @idx: NFP chip-wide port index - * @configed: Desired state - * +/** * Set the ifup/ifdown state on the PHY. * - * Return: - * 0 - configuration successful; - * 1 - no changes were needed; - * -ERRNO - configuration failed. + * @param cpp + * NFP CPP handle + * @param idx + * NFP chip-wide port index + * @param configured + * Desired state + * + * @return + * - (0) Configuration successful + * - (1) No changes were needed + * - (-ERRNO) Configuration failed */ int -nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed) +nfp_eth_set_configured(struct nfp_cpp *cpp, + uint32_t idx, + bool configured) { - union eth_table_entry *entries; - struct nfp_nsp *nsp; uint64_t reg; + struct nfp_nsp *nsp; + union eth_table_entry *entries; nsp = nfp_eth_config_start(cpp, idx); if (nsp == NULL) @@ -510,30 +490,33 @@ nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed) /* Check if we are already in requested state */ reg = rte_le_to_cpu_64(entries[idx].state); - if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + if (configured != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { reg = rte_le_to_cpu_64(entries[idx].control); reg &= ~NSP_ETH_CTRL_CONFIGURED; - reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configured); entries[idx].control = rte_cpu_to_le_64(reg); - nfp_nsp_config_set_modified(nsp, 1); + nfp_nsp_config_set_modified(nsp, true); } return nfp_eth_config_commit_end(nsp); } static int -nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, - const uint64_t mask, const unsigned int shift, - unsigned int val, const uint64_t ctrl_bit) +nfp_eth_set_bit_config(struct nfp_nsp *nsp, + uint32_t raw_idx, + const uint64_t mask, + const uint32_t shift, + uint32_t val, + const uint64_t ctrl_bit) { - union eth_table_entry *entries = nfp_nsp_config_entries(nsp); - unsigned int idx = nfp_nsp_config_idx(nsp); uint64_t reg; + uint32_t idx = nfp_nsp_config_idx(nsp); + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); /* * Note: set features were added in ABI 0.14 but the error - * codes were initially not populated correctly. + * codes were initially not populated correctly. */ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { PMD_DRV_LOG(ERR, "set operations not supported, please update flash"); @@ -551,77 +534,87 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, entries[idx].control |= rte_cpu_to_le_64(ctrl_bit); - nfp_nsp_config_set_modified(nsp, 1); + nfp_nsp_config_set_modified(nsp, true); return 0; } -#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ - (__extension__ ({ \ - typeof(mask) _x = (mask); \ +#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ + (__extension__ ({ \ + typeof(mask) _x = (mask); \ nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \ - val, ctrl_bit); \ + val, ctrl_bit); \ })) -/* - * __nfp_eth_set_aneg() - set PHY autonegotiation control bit - * @nsp: NFP NSP handle returned from nfp_eth_config_start() - * @mode: Desired autonegotiation mode - * +/** * Allow/disallow PHY module to advertise/perform autonegotiation. * Will write to hwinfo overrides in the flash (persistent config). * - * Return: 0 or -ERRNO. + * @param nsp + * NFP NSP handle returned from nfp_eth_config_start() + * @param mode + * Desired autonegotiation mode + * + * @return + * 0 or -ERRNO */ int -__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +nfp_eth_set_aneg(struct nfp_nsp *nsp, + enum nfp_eth_aneg mode) { return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, - NSP_ETH_STATE_ANEG, mode, - NSP_ETH_CTRL_SET_ANEG); + NSP_ETH_STATE_ANEG, mode, NSP_ETH_CTRL_SET_ANEG); } -/* - * __nfp_eth_set_fec() - set PHY forward error correction control bit - * @nsp: NFP NSP handle returned from nfp_eth_config_start() - * @mode: Desired fec mode - * +/** * Set the PHY module forward error correction mode. * Will write to hwinfo overrides in the flash (persistent config). * - * Return: 0 or -ERRNO. + * @param nsp + * NFP NSP handle returned from nfp_eth_config_start() + * @param mode + * Desired fec mode + * + * @return + * 0 or -ERRNO */ static int -__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode) +nfp_eth_set_fec_real(struct nfp_nsp *nsp, + enum nfp_eth_fec mode) { return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, - NSP_ETH_STATE_FEC, mode, - NSP_ETH_CTRL_SET_FEC); + NSP_ETH_STATE_FEC, mode, NSP_ETH_CTRL_SET_FEC); } -/* - * nfp_eth_set_fec() - set PHY forward error correction control mode - * @cpp: NFP CPP handle - * @idx: NFP chip-wide port index - * @mode: Desired fec mode +/** + * Set PHY forward error correction control mode * - * Return: - * 0 - configuration successful; - * 1 - no changes were needed; - * -ERRNO - configuration failed. + * @param cpp + * NFP CPP handle + * @param idx + * NFP chip-wide port index + * @param mode + * Desired fec mode + * + * @return + * - (0) Configuration successful + * - (1) No changes were needed + * - (-ERRNO) Configuration failed */ int -nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) +nfp_eth_set_fec(struct nfp_cpp *cpp, + uint32_t idx, + enum nfp_eth_fec mode) { - struct nfp_nsp *nsp; int err; + struct nfp_nsp *nsp; nsp = nfp_eth_config_start(cpp, idx); if (nsp == NULL) return -EIO; - err = __nfp_eth_set_fec(nsp, mode); - if (err) { + err = nfp_eth_set_fec_real(nsp, mode); + if (err != 0) { nfp_eth_config_cleanup_end(nsp); return err; } @@ -629,47 +622,52 @@ nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) return nfp_eth_config_commit_end(nsp); } -/* - * __nfp_eth_set_speed() - set interface speed/rate - * @nsp: NFP NSP handle returned from nfp_eth_config_start() - * @speed: Desired speed (per lane) - * - * Set lane speed. Provided @speed value should be subport speed divided - * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for - * 50G, etc.) +/** + * Set lane speed. + * Provided @speed value should be subport speed divided by number of + * lanes this subport is spanning (i.e. 10000 for 40G, 25000 for 50G, etc.) * Will write to hwinfo overrides in the flash (persistent config). * - * Return: 0 or -ERRNO. + * @param nsp + * NFP NSP handle returned from nfp_eth_config_start() + * @param speed + * Desired speed (per lane) + * + * @return + * 0 or -ERRNO */ int -__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +nfp_eth_set_speed(struct nfp_nsp *nsp, + uint32_t speed) { enum nfp_eth_rate rate; rate = nfp_eth_speed2rate(speed); if (rate == RATE_INVALID) { - PMD_DRV_LOG(ERR, "could not find matching lane rate for speed %u", speed); + PMD_DRV_LOG(ERR, "Could not find matching lane rate for speed %u", speed); return -EINVAL; } return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, - NSP_ETH_STATE_RATE, rate, - NSP_ETH_CTRL_SET_RATE); + NSP_ETH_STATE_RATE, rate, NSP_ETH_CTRL_SET_RATE); } -/* - * __nfp_eth_set_split() - set interface lane split - * @nsp: NFP NSP handle returned from nfp_eth_config_start() - * @lanes: Desired lanes per port - * +/** * Set number of lanes in the port. * Will write to hwinfo overrides in the flash (persistent config). * - * Return: 0 or -ERRNO. + * @param nsp + * NFP NSP handle returned from nfp_eth_config_start() + * @param lanes + * Desired lanes per port + * + * @return + * 0 or -ERRNO */ int -__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +nfp_eth_set_split(struct nfp_nsp *nsp, + uint32_t lanes) { - return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, - lanes, NSP_ETH_CTRL_SET_LANES); + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, + NSP_ETH_PORT_LANES, lanes, NSP_ETH_CTRL_SET_LANES); } diff --git a/drivers/net/nfp/nfpcore/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp_platform.h new file mode 100644 index 00000000000..1687942e418 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_platform.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Corigine, Inc. + * All rights reserved. + */ + +#ifndef __NFP_PLATFORM_H__ +#define __NFP_PLATFORM_H__ + +#include + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#define DMA_BIT_MASK(n) ((1ULL << (n)) - 1) + +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) +#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8) + +#define GENMASK(h, l) \ + ((~0UL << (l)) & (~0UL >> (BITS_PER_LONG - (h) - 1))) + +#define GENMASK_ULL(h, l) \ + ((~0ULL << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - (h) - 1))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +#endif /* __NFP_PLATFORM_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c index 351bc623edb..d59d2d6c1e1 100644 --- a/drivers/net/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/nfp/nfpcore/nfp_resource.c @@ -3,57 +3,45 @@ * All rights reserved. */ -#include -#include -#include - -#include "nfp_cpp.h" -#include "nfp_logs.h" -#include "nfp6000/nfp6000.h" #include "nfp_resource.h" + #include "nfp_crc.h" +#include "nfp_logs.h" +#include "nfp_mutex.h" +#include "nfp_target.h" -#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU -#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL /* NFP Resource Table self-identifier */ -#define NFP_RESOURCE_TBL_NAME "nfp.res" -#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ - -#define NFP_RESOURCE_ENTRY_NAME_SZ 8 - -/* - * struct nfp_resource_entry - Resource table entry - * @owner: NFP CPP Lock, interface owner - * @key: NFP CPP Lock, posix_crc32(name, 8) - * @region: Memory region descriptor - * @name: ASCII, zero padded name - * @reserved - * @cpp_action: CPP Action - * @cpp_token: CPP Token - * @cpp_target: CPP Target ID - * @page_offset: 256-byte page offset into target's CPP address - * @page_size: size, in 256-byte pages - */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/* Resource table entry */ struct nfp_resource_entry { struct nfp_resource_entry_mutex { - uint32_t owner; - uint32_t key; + uint32_t owner; /**< NFP CPP Lock, interface owner */ + uint32_t key; /**< NFP CPP Lock, posix_crc32(name, 8) */ } mutex; + /* Memory region descriptor */ struct nfp_resource_entry_region { + /** ASCII, zero padded name */ uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ]; uint8_t reserved[5]; - uint8_t cpp_action; - uint8_t cpp_token; - uint8_t cpp_target; + uint8_t cpp_action; /**< CPP Action */ + uint8_t cpp_token; /**< CPP Token */ + uint8_t cpp_target; /**< CPP Target ID */ + /** 256-byte page offset into target's CPP address */ uint32_t page_offset; - uint32_t page_size; + uint32_t page_size; /**< Size, in 256-byte pages */ } region; }; -#define NFP_RESOURCE_TBL_SIZE 4096 -#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \ - sizeof(struct nfp_resource_entry)) +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) struct nfp_resource { char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; @@ -64,12 +52,15 @@ struct nfp_resource { }; static int -nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +nfp_cpp_resource_find(struct nfp_cpp *cpp, + struct nfp_resource *res) { - char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2]; + int ret; + uint32_t i; + uint32_t key; + uint32_t cpp_id; struct nfp_resource_entry entry; - uint32_t cpp_id, key; - int ret, i; + char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2]; cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ @@ -81,11 +72,12 @@ nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) PMD_DRV_LOG(ERR, "Grabbing device lock not supported"); return -EOPNOTSUPP; } + key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ); for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { uint64_t addr = NFP_RESOURCE_TBL_BASE + - sizeof(struct nfp_resource_entry) * i; + sizeof(struct nfp_resource_entry) * i; ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); if (ret != sizeof(entry)) @@ -95,14 +87,14 @@ nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) continue; /* Found key! */ - res->mutex = - nfp_cpp_mutex_alloc(cpp, - NFP_RESOURCE_TBL_TARGET, addr, key); + res->mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + addr, key); res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, - entry.region.cpp_action, - entry.region.cpp_token); + entry.region.cpp_action, + entry.region.cpp_token); res->addr = ((uint64_t)entry.region.page_offset) << 8; res->size = (uint64_t)entry.region.page_size << 8; + return 0; } @@ -110,21 +102,28 @@ nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) } static int -nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, - struct nfp_cpp_mutex *dev_mutex) +nfp_resource_try_acquire(struct nfp_cpp *cpp, + struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) { int err; - if (nfp_cpp_mutex_lock(dev_mutex)) + if (nfp_cpp_mutex_lock(dev_mutex) != 0) { + PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex lock failed"); return -EINVAL; + } err = nfp_cpp_resource_find(cpp, res); - if (err) + if (err != 0) { + PMD_DRV_LOG(ERR, "RESOURCE - CPP resource find failed"); goto err_unlock_dev; + } err = nfp_cpp_mutex_trylock(res->mutex); - if (err) + if (err != 0) { + PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex trylock failed"); goto err_res_mutex_free; + } nfp_cpp_mutex_unlock(dev_mutex); @@ -138,23 +137,28 @@ nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, return err; } -/* - * nfp_resource_acquire() - Acquire a resource handle - * @cpp: NFP CPP handle - * @name: Name of the resource +/** + * Acquire a resource handle * - * NOTE: This function locks the acquired resource + * Note: This function locks the acquired resource. * - * Return: NFP Resource handle, or NULL + * @param cpp + * NFP CPP handle + * @param name + * Name of the resource + * + * @return + * NFP Resource handle, or NULL */ struct nfp_resource * -nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +nfp_resource_acquire(struct nfp_cpp *cpp, + const char *name) { - struct nfp_cpp_mutex *dev_mutex; - struct nfp_resource *res; int err; + uint16_t count = 0; struct timespec wait; - int count; + struct nfp_resource *res; + struct nfp_cpp_mutex *dev_mutex; res = malloc(sizeof(*res)); if (res == NULL) @@ -165,28 +169,27 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, - NFP_RESOURCE_TBL_BASE, - NFP_RESOURCE_TBL_KEY); + NFP_RESOURCE_TBL_BASE, NFP_RESOURCE_TBL_KEY); if (dev_mutex == NULL) { - free(res); - return NULL; + PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex alloc failed"); + goto err_free; } wait.tv_sec = 0; - wait.tv_nsec = 1000000; - count = 0; + wait.tv_nsec = 1000000; /* 1ms */ for (;;) { err = nfp_resource_try_acquire(cpp, res, dev_mutex); if (err == 0) break; - if (err != -EBUSY) - goto err_free; + if (err != -EBUSY) { + PMD_DRV_LOG(ERR, "RESOURCE - try acquire failed"); + goto mutex_free; + } - if (count++ > 1000) { + if (count++ > 1000) { /* 1ms * 1000 = 1s */ PMD_DRV_LOG(ERR, "Error: resource %s timed out", name); - err = -EBUSY; - goto err_free; + goto mutex_free; } nanosleep(&wait, NULL); @@ -196,17 +199,20 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) return res; -err_free: +mutex_free: nfp_cpp_mutex_free(dev_mutex); +err_free: free(res); return NULL; } -/* - * nfp_resource_release() - Release a NFP Resource handle - * @res: NFP Resource handle +/** + * Release a NFP Resource handle * - * NOTE: This function implicitly unlocks the resource handle + * NOTE: This function implicitly unlocks the resource handle. + * + * @param res + * NFP Resource handle */ void nfp_resource_release(struct nfp_resource *res) @@ -216,11 +222,14 @@ nfp_resource_release(struct nfp_resource *res) free(res); } -/* - * nfp_resource_cpp_id() - Return the cpp_id of a resource handle - * @res: NFP Resource handle +/** + * Return the cpp_id of a resource handle + * + * @param res + * NFP Resource handle * - * Return: NFP CPP ID + * @return + * NFP CPP ID */ uint32_t nfp_resource_cpp_id(const struct nfp_resource *res) @@ -228,23 +237,29 @@ nfp_resource_cpp_id(const struct nfp_resource *res) return res->cpp_id; } -/* - * nfp_resource_name() - Return the name of a resource handle - * @res: NFP Resource handle +/** + * Return the name of a resource handle * - * Return: const char pointer to the name of the resource + * @param res + * NFP Resource handle + * + * @return + * Const char pointer to the name of the resource */ -const char -*nfp_resource_name(const struct nfp_resource *res) +const char * +nfp_resource_name(const struct nfp_resource *res) { return res->name; } -/* - * nfp_resource_address() - Return the address of a resource handle - * @res: NFP Resource handle +/** + * Return the address of a resource handle + * + * @param res + * NFP Resource handle * - * Return: Address of the resource + * @return + * Address of the resource */ uint64_t nfp_resource_address(const struct nfp_resource *res) @@ -252,11 +267,14 @@ nfp_resource_address(const struct nfp_resource *res) return res->addr; } -/* - * nfp_resource_size() - Return the size in bytes of a resource handle - * @res: NFP Resource handle +/** + * Return the size in bytes of a resource handle + * + * @param res + * NFP Resource handle * - * Return: Size of the resource in bytes + * @return + * Size of the resource in bytes */ uint64_t nfp_resource_size(const struct nfp_resource *res) diff --git a/drivers/net/nfp/nfpcore/nfp_resource.h b/drivers/net/nfp/nfpcore/nfp_resource.h index 06cc6f74f41..f49c99e462b 100644 --- a/drivers/net/nfp/nfpcore/nfp_resource.h +++ b/drivers/net/nfp/nfpcore/nfp_resource.h @@ -3,50 +3,34 @@ * All rights reserved. */ -#ifndef NFP_RESOURCE_H -#define NFP_RESOURCE_H +#ifndef __NFP_RESOURCE_H__ +#define __NFP_RESOURCE_H__ #include "nfp_cpp.h" +/* Netronone Flow Firmware Table */ #define NFP_RESOURCE_NFP_NFFW "nfp.nffw" + +/* NFP Hardware Info Database */ #define NFP_RESOURCE_NFP_HWINFO "nfp.info" -#define NFP_RESOURCE_NSP "nfp.sp" -/** - * Opaque handle to a NFP Resource - */ +/* Service Processor */ +#define NFP_RESOURCE_NSP "nfp.sp" + +/* Opaque handle to a NFP Resource */ struct nfp_resource; struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp, - const char *name); + const char *name); -/** - * Release a NFP Resource, and free the handle - * @param[in] res NFP Resource handle - */ void nfp_resource_release(struct nfp_resource *res); -/** - * Return the CPP ID of a NFP Resource - * @param[in] res NFP Resource handle - * @return CPP ID of the NFP Resource - */ uint32_t nfp_resource_cpp_id(const struct nfp_resource *res); -/** - * Return the name of a NFP Resource - * @param[in] res NFP Resource handle - * @return Name of the NFP Resource - */ const char *nfp_resource_name(const struct nfp_resource *res); -/** - * Return the target address of a NFP Resource - * @param[in] res NFP Resource handle - * @return Address of the NFP Resource - */ uint64_t nfp_resource_address(const struct nfp_resource *res); uint64_t nfp_resource_size(const struct nfp_resource *res); -#endif /* NFP_RESOURCE_H */ +#endif /* __NFP_RESOURCE_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c index 343b0d0bcf5..5cefbace963 100644 --- a/drivers/net/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c @@ -8,30 +8,66 @@ * Interface for accessing run-time symbol table */ -#include +#include "nfp_rtsym.h" + #include -#include "nfp_cpp.h" + #include "nfp_logs.h" #include "nfp_mip.h" -#include "nfp_rtsym.h" +#include "nfp_target.h" #include "nfp6000/nfp6000.h" +enum nfp_rtsym_type { + NFP_RTSYM_TYPE_NONE, + NFP_RTSYM_TYPE_OBJECT, + NFP_RTSYM_TYPE_FUNCTION, + NFP_RTSYM_TYPE_ABS, +}; + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + /* These need to match the linker */ -#define SYM_TGT_LMEM 0 -#define SYM_TGT_EMU_CACHE 0x17 +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 struct nfp_rtsym_entry { - uint8_t type; - uint8_t target; - uint8_t island; - uint8_t addr_hi; + uint8_t type; + uint8_t target; + uint8_t island; + uint8_t addr_hi; uint32_t addr_lo; uint16_t name; - uint8_t menum; - uint8_t size_hi; + uint8_t menum; + uint8_t size_hi; uint32_t size_lo; }; +/* + * Structure describing a run-time NFP symbol. + * + * The memory target of the symbol is generally the CPP target number and can be + * used directly by the nfp_cpp API calls. However, in some cases (i.e., for + * local memory or control store) the target is encoded using a negative number. + * + * When the target type can not be used to fully describe the location of a + * symbol the domain field is used to further specify the location (i.e., the + * specific ME or island number). + * + * For ME target resources, 'domain' is an MEID. + * For Island target resources, 'domain' is an island ID, with the one exception + * of "sram" symbols for backward compatibility, which are viewed as global. + */ +struct nfp_rtsym { + const char *name; /**< Symbol name */ + uint64_t addr; /**< Address in the domain/target's address space */ + uint64_t size; /**< Size (in bytes) of the symbol */ + enum nfp_rtsym_type type; /**< NFP_RTSYM_TYPE_* of the symbol */ + int target; /**< CPP target identifier, or NFP_RTSYM_TARGET_* */ + int domain; /**< CPP target domain */ +}; + struct nfp_rtsym_table { struct nfp_cpp *cpp; int num; @@ -40,25 +76,26 @@ struct nfp_rtsym_table { }; static int -nfp_meid(uint8_t island_id, uint8_t menum) +nfp_meid(uint8_t island_id, + uint8_t menum) { return (island_id & 0x3F) == island_id && menum < 12 ? (island_id << 4) | (menum + 4) : -1; } static void -nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size, - struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, + uint32_t strtab_size, + struct nfp_rtsym *sw, + struct nfp_rtsym_entry *fw) { sw->type = fw->type; sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size; sw->addr = ((uint64_t)fw->addr_hi << 32) | - rte_le_to_cpu_32(fw->addr_lo); + rte_le_to_cpu_32(fw->addr_lo); sw->size = ((uint64_t)fw->size_hi << 32) | - rte_le_to_cpu_32(fw->size_lo); + rte_le_to_cpu_32(fw->size_lo); - PMD_INIT_LOG(DEBUG, "rtsym_entry_init name=%s, addr=%" PRIx64 ", size=%" PRIu64 ", target=%d", - sw->name, sw->addr, sw->size, sw->target); switch (fw->target) { case SYM_TGT_LMEM: sw->target = NFP_RTSYM_TARGET_LMEM; @@ -79,29 +116,22 @@ nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size, sw->domain = -1; } -struct nfp_rtsym_table * -nfp_rtsym_table_read(struct nfp_cpp *cpp) +static struct nfp_rtsym_table * +nfp_rtsym_table_read_real(struct nfp_cpp *cpp, + const struct nfp_mip *mip) { - struct nfp_rtsym_table *rtbl; - struct nfp_mip *mip; - - mip = nfp_mip_open(cpp); - rtbl = __nfp_rtsym_table_read(cpp, mip); - nfp_mip_close(mip); - - return rtbl; -} - -struct nfp_rtsym_table * -__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) -{ - uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size; - struct nfp_rtsym_entry *rtsymtab; + int n; + int err; + uint32_t size; + uint32_t strtab_addr; + uint32_t symtab_addr; + uint32_t strtab_size; + uint32_t symtab_size; struct nfp_rtsym_table *cache; + struct nfp_rtsym_entry *rtsymtab; const uint32_t dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | NFP_ISL_EMEM0; - int err, n, size; if (mip == NULL) return NULL; @@ -142,7 +172,7 @@ __nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) for (n = 0; n < cache->num; n++) nfp_rtsym_sw_entry_init(cache, strtab_size, - &cache->symtab[n], &rtsymtab[n]); + &cache->symtab[n], &rtsymtab[n]); free(rtsymtab); @@ -155,11 +185,27 @@ __nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) return NULL; } -/* - * nfp_rtsym_count() - Get the number of RTSYM descriptors - * @rtbl: NFP RTsym table +struct nfp_rtsym_table * +nfp_rtsym_table_read(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + struct nfp_rtsym_table *rtbl; + + mip = nfp_mip_open(cpp); + rtbl = nfp_rtsym_table_read_real(cpp, mip); + nfp_mip_close(mip); + + return rtbl; +} + +/** + * Get the number of RTSYM descriptors + * + * @param rtbl + * NFP RTSYM table * - * Return: Number of RTSYM descriptors + * @return + * Number of RTSYM descriptors */ int nfp_rtsym_count(struct nfp_rtsym_table *rtbl) @@ -170,15 +216,20 @@ nfp_rtsym_count(struct nfp_rtsym_table *rtbl) return rtbl->num; } -/* - * nfp_rtsym_get() - Get the Nth RTSYM descriptor - * @rtbl: NFP RTsym table - * @idx: Index (0-based) of the RTSYM descriptor +/** + * Get the Nth RTSYM descriptor * - * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + * @param rtbl + * NFP RTSYM table + * @param idx + * Index (0-based) of the RTSYM descriptor + * + * @return + * Const pointer to a struct nfp_rtsym descriptor, or NULL */ const struct nfp_rtsym * -nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx) +nfp_rtsym_get(struct nfp_rtsym_table *rtbl, + int idx) { if (rtbl == NULL) return NULL; @@ -189,15 +240,20 @@ nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx) return &rtbl->symtab[idx]; } -/* - * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name - * @rtbl: NFP RTsym table - * @name: Symbol name +/** + * Return the RTSYM descriptor for a symbol name + * + * @param rtbl + * NFP RTSYM table + * @param name + * Symbol name * - * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + * @return + * Const pointer to a struct nfp_rtsym descriptor, or NULL */ const struct nfp_rtsym * -nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, + const char *name) { int n; @@ -216,15 +272,16 @@ nfp_rtsym_size(const struct nfp_rtsym *sym) { switch (sym->type) { case NFP_RTSYM_TYPE_NONE: - PMD_DRV_LOG(ERR, "rtsym '%s': type NONE", sym->name); + PMD_DRV_LOG(ERR, "The type of rtsym '%s' is NONE", sym->name); return 0; - case NFP_RTSYM_TYPE_OBJECT: /* Fall through */ + case NFP_RTSYM_TYPE_OBJECT: + /* FALLTHROUGH */ case NFP_RTSYM_TYPE_FUNCTION: return sym->size; case NFP_RTSYM_TYPE_ABS: return sizeof(uint64_t); default: - PMD_DRV_LOG(ERR, "rtsym '%s': unknown type: %d", sym->name, sym->type); + PMD_DRV_LOG(ERR, "Unknown RTSYM type %u", sym->type); return 0; } } @@ -266,7 +323,59 @@ nfp_rtsym_to_dest(struct nfp_cpp *cpp, } static int -nfp_rtsym_readl(struct nfp_cpp *cpp, +nfp_rtsym_read_real(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint8_t action, + uint8_t token, + uint64_t offset, + void *buf, + size_t len) +{ + int err; + uint64_t addr; + uint32_t cpp_id; + size_t length = len; + uint64_t sym_size = nfp_rtsym_size(sym); + + if (offset >= sym_size) { + PMD_DRV_LOG(ERR, "rtsym '%s' read out of bounds", sym->name); + return -ENXIO; + } + + if (length > sym_size - offset) + length = sym_size - offset; + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + union { + uint64_t value_64; + uint8_t value_8[8]; + } tmp; + + tmp.value_64 = sym->addr; + memcpy(buf, &tmp.value_8[offset], length); + + return length; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); + if (err != 0) + return err; + + return nfp_cpp_read(cpp, cpp_id, addr, buf, length); +} + +int +nfp_rtsym_read(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint64_t offset, + void *buf, + size_t len) +{ + return nfp_rtsym_read_real(cpp, sym, NFP_CPP_ACTION_RW, 0, offset, buf, len); +} + +static int +nfp_rtsym_readl_real(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, uint8_t action, uint8_t token, @@ -289,8 +398,17 @@ nfp_rtsym_readl(struct nfp_cpp *cpp, return nfp_cpp_readl(cpp, cpp_id, addr, value); } +int +nfp_rtsym_readl(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint64_t offset, + uint32_t *value) +{ + return nfp_rtsym_readl_real(cpp, sym, NFP_CPP_ACTION_RW, 0, offset, value); +} + static int -nfp_rtsym_readq(struct nfp_cpp *cpp, +nfp_rtsym_readq_real(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, uint8_t action, uint8_t token, @@ -318,25 +436,147 @@ nfp_rtsym_readq(struct nfp_cpp *cpp, return nfp_cpp_readq(cpp, cpp_id, addr, value); } -/* - * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol - * @rtbl: NFP RTsym table - * @name: Symbol name - * @error: Pointer to error code (optional) +int +nfp_rtsym_readq(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint64_t offset, + uint64_t *value) +{ + return nfp_rtsym_readq_real(cpp, sym, NFP_CPP_ACTION_RW, 0, offset, value); +} + +static int +nfp_rtsym_write_real(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint8_t action, + uint8_t token, + uint64_t offset, + void *buf, + size_t len) +{ + int err; + uint64_t addr; + uint32_t cpp_id; + size_t length = len; + uint64_t sym_size = nfp_rtsym_size(sym); + + if (offset > sym_size) { + PMD_DRV_LOG(ERR, "rtsym '%s' write out of bounds", sym->name); + return -ENXIO; + } + + if (length > sym_size - offset) + length = sym_size - offset; + + err = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); + if (err != 0) + return err; + + return nfp_cpp_write(cpp, cpp_id, addr, buf, length); +} + +int +nfp_rtsym_write(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint64_t offset, + void *buf, + size_t len) +{ + return nfp_rtsym_write_real(cpp, sym, NFP_CPP_ACTION_RW, 0, offset, buf, len); +} + +static int +nfp_rtsym_writel_real(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint8_t action, + uint8_t token, + uint64_t offset, + uint32_t value) +{ + int err; + uint64_t addr; + uint32_t cpp_id; + + if (offset + 4 > nfp_rtsym_size(sym)) { + PMD_DRV_LOG(ERR, "rtsym '%s' write out of bounds", sym->name); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); + if (err != 0) + return err; + + return nfp_cpp_writel(cpp, cpp_id, addr, value); +} + +int +nfp_rtsym_writel(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint64_t offset, + uint32_t value) +{ + return nfp_rtsym_writel_real(cpp, sym, NFP_CPP_ACTION_RW, 0, offset, value); +} + +static int +nfp_rtsym_writeq_real(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint8_t action, + uint8_t token, + uint64_t offset, + uint64_t value) +{ + int err; + uint64_t addr; + uint32_t cpp_id; + + if (offset + 8 > nfp_rtsym_size(sym)) { + PMD_DRV_LOG(ERR, "rtsym '%s' write out of bounds", sym->name); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); + if (err != 0) + return err; + + return nfp_cpp_writeq(cpp, cpp_id, addr, value); +} + +int +nfp_rtsym_writeq(struct nfp_cpp *cpp, + const struct nfp_rtsym *sym, + uint64_t offset, + uint64_t value) +{ + return nfp_rtsym_writeq_real(cpp, sym, NFP_CPP_ACTION_RW, 0, offset, value); +} + +/** + * Read a simple unsigned scalar value from symbol * * Lookup a symbol, map, read it and return it's value. Value of the symbol * will be interpreted as a simple little-endian unsigned value. Symbol can * be 4 or 8 bytes in size. * - * Return: value read, on error sets the error and returns ~0ULL. + * @param rtbl + * NFP RTSYM table + * @param name + * Symbol name + * @param error + * Pointer to error code (optional) + * + * @return + * Value read, on error sets the error and returns ~0ULL. */ uint64_t -nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) +nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, + const char *name, + int *error) { - const struct nfp_rtsym *sym; - uint32_t val32; - uint64_t val; int err; + uint64_t val; + uint32_t val32; + const struct nfp_rtsym *sym; sym = nfp_rtsym_lookup(rtbl, name); if (sym == NULL) { @@ -346,34 +586,82 @@ nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) switch (sym->size) { case 4: - err = nfp_rtsym_readl(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val32); + err = nfp_rtsym_readl(rtbl->cpp, sym, 0, &val32); val = val32; break; case 8: - err = nfp_rtsym_readq(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val); + err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val); break; default: - PMD_DRV_LOG(ERR, "rtsym '%s' unsupported size: %" PRId64, - name, sym->size); + PMD_DRV_LOG(ERR, "rtsym '%s' unsupported size: %#lx", + name, sym->size); err = -EINVAL; break; } - if (err) - err = -EIO; exit: - if (error) + if (error != NULL) *error = err; - if (err) + if (err != 0) return ~0ULL; return val; } +/** + * Write an unsigned scalar value to a symbol + * + * Lookup a symbol and write a value to it. Symbol can be 4 or 8 bytes in size. + * If 4 bytes then the lower 32-bits of 'value' are used. Value will be + * written as simple little-endian unsigned value. + * + * @param rtbl + * NFP RTSYM table + * @param name + * Symbol name + * @param value + * Value to write + * + * @return + * 0 on success or error code. + */ +int +nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, + const char *name, + uint64_t value) +{ + int err; + uint64_t sym_size; + const struct nfp_rtsym *sym; + + sym = nfp_rtsym_lookup(rtbl, name); + if (sym == NULL) + return -ENOENT; + + sym_size = nfp_rtsym_size(sym); + switch (sym_size) { + case 4: + err = nfp_rtsym_writel(rtbl->cpp, sym, 0, value); + break; + case 8: + err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value); + break; + default: + PMD_DRV_LOG(ERR, "rtsym '%s' unsupported size: %#lx", + name, sym_size); + err = -EINVAL; + break; + } + + return err; +} + uint8_t * -nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, + const char *name, + uint32_t min_size, + struct nfp_cpp_area **area) { int ret; uint8_t *mem; @@ -381,10 +669,9 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, uint32_t cpp_id; const struct nfp_rtsym *sym; - PMD_DRV_LOG(DEBUG, "mapping symbol %s", name); sym = nfp_rtsym_lookup(rtbl, name); if (sym == NULL) { - PMD_INIT_LOG(ERR, "symbol lookup fails for %s", name); + PMD_DRV_LOG(ERR, "Symbol lookup fails for %s", name); return NULL; } @@ -397,16 +684,15 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, if (sym->size < min_size) { PMD_DRV_LOG(ERR, "Symbol %s too small (%" PRIu64 " < %u)", name, - sym->size, min_size); + sym->size, min_size); return NULL; } mem = nfp_cpp_map_area(rtbl->cpp, cpp_id, addr, sym->size, area); if (mem == NULL) { - PMD_INIT_LOG(ERR, "Failed to map symbol %s", name); + PMD_DRV_LOG(ERR, "Failed to map symbol %s", name); return NULL; } - PMD_DRV_LOG(DEBUG, "symbol %s with address %p", name, mem); return mem; } diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.h b/drivers/net/nfp/nfpcore/nfp_rtsym.h index 8b494211bc2..f79637ac501 100644 --- a/drivers/net/nfp/nfpcore/nfp_rtsym.h +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.h @@ -6,56 +6,39 @@ #ifndef __NFP_RTSYM_H__ #define __NFP_RTSYM_H__ -#define NFP_RTSYM_TYPE_NONE 0 -#define NFP_RTSYM_TYPE_OBJECT 1 -#define NFP_RTSYM_TYPE_FUNCTION 2 -#define NFP_RTSYM_TYPE_ABS 3 - -#define NFP_RTSYM_TARGET_NONE 0 -#define NFP_RTSYM_TARGET_LMEM -1 -#define NFP_RTSYM_TARGET_EMU_CACHE -7 - -/* - * Structure describing a run-time NFP symbol. - * - * The memory target of the symbol is generally the CPP target number and can be - * used directly by the nfp_cpp API calls. However, in some cases (i.e., for - * local memory or control store) the target is encoded using a negative number. - * - * When the target type can not be used to fully describe the location of a - * symbol the domain field is used to further specify the location (i.e., the - * specific ME or island number). - * - * For ME target resources, 'domain' is an MEID. - * For Island target resources, 'domain' is an island ID, with the one exception - * of "sram" symbols for backward compatibility, which are viewed as global. - */ -struct nfp_rtsym { - const char *name; - uint64_t addr; - uint64_t size; - int type; - int target; - int domain; -}; +#include "nfp_cpp.h" +struct nfp_rtsym; struct nfp_rtsym_table; struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp); -struct nfp_rtsym_table * -__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip); - int nfp_rtsym_count(struct nfp_rtsym_table *rtbl); const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); -const struct nfp_rtsym * -nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); +const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, + const char *name); + +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + uint64_t offset, void *buf, size_t len); +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + uint64_t offset, uint32_t *value); +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + uint64_t offset, uint64_t *value); + +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + uint64_t offset, void *buf, size_t len); +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + uint64_t offset, uint32_t value); +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + uint64_t offset, uint64_t value); uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, - int *error); -uint8_t * -nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, - unsigned int min_size, struct nfp_cpp_area **area); -#endif + int *error); +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + uint64_t value); +uint8_t *nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + uint32_t min_size, struct nfp_cpp_area **area); + +#endif /* __NFP_RTSYM_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_target.c b/drivers/net/nfp/nfpcore/nfp_target.c index 611848e2339..ecb45f79282 100644 --- a/drivers/net/nfp/nfpcore/nfp_target.c +++ b/drivers/net/nfp/nfpcore/nfp_target.c @@ -3,6 +3,8 @@ * All rights reserved. */ +#include "nfp_target.h" + #include "nfp_cpp.h" #include "nfp6000/nfp6000.h" @@ -315,8 +317,7 @@ nfp6000_mu(uint32_t cpp_id, } else if (island == 1 || (island >= 4 && island <= 7) || (island >= 12 && island <= 13) || - (island >= 32 && island <= 47) || - (island >= 48 && island <= 51)) { + (island >= 32 && island <= 51)) { pp = nfp6000_mu_ctm(cpp_id); } else { pp = -EINVAL; @@ -510,7 +511,7 @@ nfp_target_pushpull(uint32_t cpp_id, return nfp6000_cap_xpb(cpp_id); case NFP_CPP_TARGET_CLS: return nfp6000_cls(cpp_id); - case 0: + case NFP_CPP_TARGET_INVALID: return target_rw(cpp_id, P32, 4, 4); default: return -EINVAL; @@ -767,7 +768,7 @@ nfp_encode_basic(uint64_t *addr, /* * Make sure we compare against isldN values by clearing the * LSB. This is what the silicon does. - **/ + */ isld[0] &= ~1; isld[1] &= ~1; diff --git a/drivers/net/nfp/nfpcore/nfp_target.h b/drivers/net/nfp/nfpcore/nfp_target.h index 03908a894ff..47527b40404 100644 --- a/drivers/net/nfp/nfpcore/nfp_target.h +++ b/drivers/net/nfp/nfpcore/nfp_target.h @@ -3,574 +3,29 @@ * All rights reserved. */ -#ifndef NFP_TARGET_H -#define NFP_TARGET_H - -#include "nfp_cpp.h" - -#define P32 1 -#define P64 2 - -#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0)) - -#ifndef NFP_ERRNO -#include -#define NFP_ERRNO(x) (errno = (x), -1) -#endif - -static inline int -pushpull_width(int pp) -{ - pp &= 0xf; - - if (pp == 0) - return NFP_ERRNO(EINVAL); - return (2 << pp); -} - -#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) -#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) - -static inline int -target_rw(uint32_t cpp_id, int pp, int start, int len) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island && (island < start || island > (start + len))) - return NFP_ERRNO(EINVAL); - - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 0): - return PUSHPULL(0, pp); - case NFP_CPP_ID(0, 1, 0): - return PUSHPULL(pp, 0); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): - return PUSHPULL(pp, pp); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp6000_nbi_dma(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): - return PUSHPULL(P64, P64); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp6000_nbi_stats(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): - return PUSHPULL(P64, P64); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp6000_nbi_tm(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): - return PUSHPULL(P64, P64); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp6000_nbi_ppc(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): - return PUSHPULL(P64, P64); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp6000_nbi(uint32_t cpp_id, uint64_t address) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - uint64_t rel_addr = address & 0x3fFFFF; - - if (island && (island < 8 || island > 9)) - return NFP_ERRNO(EINVAL); - - if (rel_addr < (1 << 20)) - return nfp6000_nbi_dma(cpp_id); - if (rel_addr < (2 << 20)) - return nfp6000_nbi_stats(cpp_id); - if (rel_addr < (3 << 20)) - return nfp6000_nbi_tm(cpp_id); - return nfp6000_nbi_ppc(cpp_id); -} - -/* - * This structure ONLY includes items that can be done with a read or write of - * 32-bit or 64-bit words. All others are not listed. - */ -static inline int -nfp6000_mu_common(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */ - return PUSHPULL(P64, P64); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */ - return PUSHPULL(P64, P64); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */ - return PUSHPULL(P64, P64); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */ - return PUSHPULL(P64, P64); - case NFP_CPP_ID(0, 0, 0): /* read_be */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 0, 1): /* read_le */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 0, 2): /* read_swap_be */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 0, 3): /* read_swap_le */ - return PUSHPULL(0, P64); - case NFP_CPP_ID(0, 1, 0): /* write_be */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, 1, 1): /* write_le */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, 1, 2): /* write_swap_be */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, 1, 3): /* write_swap_le */ - return PUSHPULL(P64, 0); - case NFP_CPP_ID(0, 3, 0): /* atomic_read */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 4, 0): /* atomic_write */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */ - return PUSHPULL(0, 0); - case NFP_CPP_ID(0, 4, 3): /* swap_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 5, 0): /* set */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 5, 3): /* test_set_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 6, 0): /* clr */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 7, 0): /* add */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 7, 3): /* test_add_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 8, 0): /* addsat */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 9, 0): /* sub */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 10, 0): /* subsat */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 13, 0): /* microq128_get */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 13, 1): /* microq128_pop */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 13, 2): /* microq128_put */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 15, 0): /* xor */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 28, 0): /* read32_be */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 28, 1): /* read32_le */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 31, 0): /* write32_be */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 31, 1): /* write32_le */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */ - return PUSHPULL(P32, 0); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp6000_mu_ctm(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */ - return PUSHPULL(0, P32); - default: - return nfp6000_mu_common(cpp_id); - } -} - -static inline int -nfp6000_mu_emu(uint32_t cpp_id) -{ - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 18, 0): /* read_queue */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 18, 2): /* write_queue */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 20, 2): /* journal */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 21, 0): /* get */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 21, 1): /* get_eop */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 21, 2): /* get_freely */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 22, 0): /* pop */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 22, 1): /* pop_eop */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 22, 2): /* pop_freely */ - return PUSHPULL(0, P32); - default: - return nfp6000_mu_common(cpp_id); - } -} - -static inline int -nfp6000_mu_imu(uint32_t cpp_id) -{ - return nfp6000_mu_common(cpp_id); -} - -static inline int -nfp6000_mu(uint32_t cpp_id, uint64_t address) -{ - int pp; - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island == 0) { - if (address < 0x2000000000ULL) - pp = nfp6000_mu_ctm(cpp_id); - else if (address < 0x8000000000ULL) - pp = nfp6000_mu_emu(cpp_id); - else if (address < 0x9800000000ULL) - pp = nfp6000_mu_ctm(cpp_id); - else if (address < 0x9C00000000ULL) - pp = nfp6000_mu_emu(cpp_id); - else if (address < 0xA000000000ULL) - pp = nfp6000_mu_imu(cpp_id); - else - pp = nfp6000_mu_ctm(cpp_id); - } else if (island >= 24 && island <= 27) { - pp = nfp6000_mu_emu(cpp_id); - } else if (island >= 28 && island <= 31) { - pp = nfp6000_mu_imu(cpp_id); - } else if (island == 1 || - (island >= 4 && island <= 7) || - (island >= 12 && island <= 13) || - (island >= 32 && island <= 47) || - (island >= 48 && island <= 51)) { - pp = nfp6000_mu_ctm(cpp_id); - } else { - pp = NFP_ERRNO(EINVAL); - } - - return pp; -} - -static inline int -nfp6000_ila(uint32_t cpp_id) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island && (island < 48 || island > 51)) - return NFP_ERRNO(EINVAL); - - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 1): /* read_check_error */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 2, 0): /* read_int */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 3, 0): /* write_int */ - return PUSHPULL(P32, 0); - default: - return target_rw(cpp_id, P32, 48, 4); - } -} - -static inline int -nfp6000_pci(uint32_t cpp_id) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island && (island < 4 || island > 7)) - return NFP_ERRNO(EINVAL); - - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 2, 0): - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 3, 0): - return PUSHPULL(P32, 0); - default: - return target_rw(cpp_id, P32, 4, 4); - } -} - -static inline int -nfp6000_crypto(uint32_t cpp_id) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island && (island < 12 || island > 15)) - return NFP_ERRNO(EINVAL); - - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 2, 0): - return PUSHPULL(P64, 0); - default: - return target_rw(cpp_id, P64, 12, 4); - } -} - -static inline int -nfp6000_cap_xpb(uint32_t cpp_id) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island > 63) - return NFP_ERRNO(EINVAL); - - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 1): /* RingGet */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 1, 1): /* RingPut */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 1, 2): /* CTNNWr */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): - return PUSHPULL(P32, P32); - default: - return target_rw(cpp_id, P32, 1, 63); - } -} - -static inline int -nfp6000_cls(uint32_t cpp_id) -{ - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_id); - - if (island > 63) - return NFP_ERRNO(EINVAL); - - switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { - case NFP_CPP_ID(0, 0, 3): /* xor */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 2, 0): /* set */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 2, 1): /* clr */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 4, 0): /* add */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 4, 1): /* add64 */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 6, 0): /* sub */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 6, 1): /* sub64 */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 6, 2): /* subsat */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 8, 2): /* hash_mask */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 8, 3): /* hash_clear */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 9, 0): /* ring_get */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 9, 1): /* ring_pop */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 10, 0): /* ring_put */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 10, 2): /* ring_journal */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 17, 2): /* statistic */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 24, 0): /* ring_read */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 24, 1): /* ring_write */ - return PUSHPULL(P32, 0); - case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */ - return PUSHPULL(0, P32); - case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */ - return PUSHPULL(P32, 0); - default: - return target_rw(cpp_id, P32, 0, 64); - } -} - -static inline int -nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address) -{ - switch (NFP_CPP_ID_TARGET_of(cpp_id)) { - case NFP6000_CPPTGT_NBI: - return nfp6000_nbi(cpp_id, address); - case NFP6000_CPPTGT_VQDR: - return target_rw(cpp_id, P32, 24, 4); - case NFP6000_CPPTGT_ILA: - return nfp6000_ila(cpp_id); - case NFP6000_CPPTGT_MU: - return nfp6000_mu(cpp_id, address); - case NFP6000_CPPTGT_PCIE: - return nfp6000_pci(cpp_id); - case NFP6000_CPPTGT_ARM: - if (address < 0x10000) - return target_rw(cpp_id, P64, 1, 1); - else - return target_rw(cpp_id, P32, 1, 1); - case NFP6000_CPPTGT_CRYPTO: - return nfp6000_crypto(cpp_id); - case NFP6000_CPPTGT_CTXPB: - return nfp6000_cap_xpb(cpp_id); - case NFP6000_CPPTGT_CLS: - return nfp6000_cls(cpp_id); - case 0: - return target_rw(cpp_id, P32, 4, 4); - default: - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp_target_pushpull_width(int pp, int write_not_read) -{ - if (pp < 0) - return pp; - - if (write_not_read) - return PULL_WIDTH(pp); - else - return PUSH_WIDTH(pp); -} - -static inline int -nfp6000_target_action_width(uint32_t cpp_id, uint64_t address, - int write_not_read) -{ - int pp; - - pp = nfp6000_target_pushpull(cpp_id, address); - - return nfp_target_pushpull_width(pp, write_not_read); -} - -static inline int -nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address, - int write_not_read) -{ - if (NFP_CPP_MODEL_IS_6000(model)) { - return nfp6000_target_action_width(cpp_id, address, - write_not_read); - } else { - return NFP_ERRNO(EINVAL); - } -} - -static inline int -nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address, - uint32_t *cpp_target_id, uint64_t *cpp_target_address, - const uint32_t *imb_table) -{ - int err; - uint8_t island = NFP_CPP_ID_ISLAND_of(cpp_island_id); - uint8_t target = NFP_CPP_ID_TARGET_of(cpp_island_id); - uint32_t imb; - - if (target >= 16) - return NFP_ERRNO(EINVAL); - - if (island == 0) { - /* Already translated */ - *cpp_target_id = cpp_island_id; - *cpp_target_address = cpp_island_address; - return 0; - } - - if (imb_table == NULL) { - /* CPP + Island only allowed on systems with IMB tables */ - return NFP_ERRNO(EINVAL); - } - - imb = imb_table[target]; - - *cpp_target_address = cpp_island_address; - err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target, - ((imb >> 13) & 7), - ((imb >> 12) & 1), - ((imb >> 6) & 0x3f), - ((imb >> 0) & 0x3f)); - if (err == 0) { - *cpp_target_id = - NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), - NFP_CPP_ID_TOKEN_of(cpp_island_id)); - } - - return err; -} - -#endif /* NFP_TARGET_H */ +#ifndef __NFP_TARGET_H__ +#define __NFP_TARGET_H__ + +#include + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +int nfp_target_pushpull(uint32_t cpp_id, uint64_t address); +int nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address, + uint32_t *cpp_target_id, uint64_t *cpp_target_address, + const uint32_t *imb_table); + +#endif /* __NFP_TARGET_H__ */ diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c index 27243d85c87..22ccdb0b7d3 100644 --- a/drivers/net/ngbe/base/ngbe_hw.c +++ b/drivers/net/ngbe/base/ngbe_hw.c @@ -1061,26 +1061,10 @@ s32 ngbe_set_pcie_master(struct ngbe_hw *hw, bool enable) { struct rte_pci_device *pci_dev = (struct rte_pci_device *)hw->back; s32 status = 0; - s32 ret = 0; u32 i; - u16 reg; - ret = rte_pci_read_config(pci_dev, ®, - sizeof(reg), PCI_COMMAND); - if (ret != sizeof(reg)) { - DEBUGOUT("Cannot read command from PCI config space!\n"); - return -1; - } - - if (enable) - reg |= PCI_COMMAND_MASTER; - else - reg &= ~PCI_COMMAND_MASTER; - - ret = rte_pci_write_config(pci_dev, ®, - sizeof(reg), PCI_COMMAND); - if (ret != sizeof(reg)) { - DEBUGOUT("Cannot write command to PCI config space!\n"); + if (rte_pci_set_bus_master(pci_dev, enable) < 0) { + DEBUGOUT("Cannot configure PCI bus master\n"); return -1; } diff --git a/drivers/net/ngbe/base/ngbe_osdep.h b/drivers/net/ngbe/base/ngbe_osdep.h index 8783fce4dd8..30598a240a4 100644 --- a/drivers/net/ngbe/base/ngbe_osdep.h +++ b/drivers/net/ngbe/base/ngbe_osdep.h @@ -181,7 +181,4 @@ static inline u64 REVERT_BIT_MASK64(u64 mask) #define ETH_P_8021Q 0x8100 #define ETH_P_8021AD 0x88A8 -#define PCI_COMMAND 0x04 -#define PCI_COMMAND_MASTER 0x4 - #endif /* _NGBE_OS_H_ */ diff --git a/drivers/net/ngbe/base/ngbe_phy_rtl.c b/drivers/net/ngbe/base/ngbe_phy_rtl.c index b0eb6c97c0f..ba63a8058a9 100644 --- a/drivers/net/ngbe/base/ngbe_phy_rtl.c +++ b/drivers/net/ngbe/base/ngbe_phy_rtl.c @@ -148,6 +148,11 @@ s32 ngbe_init_phy_rtl(struct ngbe_hw *hw) hw->phy.write_reg(hw, 27, 0xa42, 0x8011); hw->phy.write_reg(hw, 28, 0xa42, 0x5737); + /* Disable fall to 100m if signal is not good */ + hw->phy.read_reg(hw, 17, 0xa44, &value); + value &= ~0x8; + hw->phy.write_reg(hw, 17, 0xa44, value); + hw->phy.write_reg(hw, RTL_SCR, 0xa46, RTL_SCR_EXTINI); hw->phy.read_reg(hw, RTL_SCR, 0xa46, &value); if (!(value & RTL_SCR_EXTINI)) { diff --git a/drivers/net/ngbe/base/ngbe_phy_yt.c b/drivers/net/ngbe/base/ngbe_phy_yt.c index 754faadd6a1..ea313cd9a50 100644 --- a/drivers/net/ngbe/base/ngbe_phy_yt.c +++ b/drivers/net/ngbe/base/ngbe_phy_yt.c @@ -205,38 +205,68 @@ s32 ngbe_setup_phy_link_yt(struct ngbe_hw *hw, u32 speed, hw->phy.set_phy_power(hw, true); } else if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(1)) { /* fiber to rgmii */ - hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_LINK_SPEED_1GB_FULL; + break; + case NGBE_LINK_SPEED_100M_FULL: + value = NGBE_LINK_SPEED_100M_FULL; + break; + default: + value = NGBE_LINK_SPEED_1GB_FULL; + break; + } + hw->phy.autoneg_advertised |= value; + goto skip_an_fiber; + } - /* RGMII_Config1 : Config rx and tx training delay */ - value = YT_RGMII_CONF1_RXDELAY | - YT_RGMII_CONF1_TXDELAY_FE | - YT_RGMII_CONF1_TXDELAY; + value = 0; + if (speed & NGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + if (speed & NGBE_LINK_SPEED_100M_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100M_FULL; + +skip_an_fiber: rte_spinlock_lock(&hw->phy_lock); - ngbe_write_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, value); - value = YT_CHIP_MODE_SEL(1) | - YT_CHIP_SW_LDO_EN | - YT_CHIP_SW_RST; - ngbe_write_phy_reg_ext_yt(hw, YT_CHIP, 0, value); + ngbe_read_phy_reg_ext_yt(hw, YT_MISC, 0, &value); + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + value |= YT_MISC_RESV; + else if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100M_FULL) + value &= ~YT_MISC_RESV; + ngbe_write_phy_reg_ext_yt(hw, YT_MISC, 0, value); + /* close auto sensing */ ngbe_read_phy_reg_sds_ext_yt(hw, YT_AUTO, 0, &value); value &= ~YT_AUTO_SENSING; ngbe_write_phy_reg_sds_ext_yt(hw, YT_AUTO, 0, value); - ngbe_read_phy_reg_ext_yt(hw, YT_MISC, 0, &value); - value |= YT_MISC_RESV; - ngbe_write_phy_reg_ext_yt(hw, YT_MISC, 0, value); - ngbe_read_phy_reg_ext_yt(hw, YT_CHIP, 0, &value); value &= ~YT_CHIP_SW_RST; ngbe_write_phy_reg_ext_yt(hw, YT_CHIP, 0, value); + /* RGMII_Config1 : Config rx and tx training delay */ + value = YT_RGMII_CONF1_RXDELAY | + YT_RGMII_CONF1_TXDELAY_FE | + YT_RGMII_CONF1_TXDELAY; + + ngbe_write_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, value); + value = YT_CHIP_MODE_SEL(1) | + YT_CHIP_SW_LDO_EN | + YT_CHIP_SW_RST; + ngbe_write_phy_reg_ext_yt(hw, YT_CHIP, 0, value); + /* software reset */ - if (hw->mac.autoneg) + if (hw->mac.autoneg) { value = YT_BCR_RESET | YT_BCR_ANE | YT_BCR_RESTART_AN | YT_BCR_DUPLEX | YT_BCR_SPEED_SELECT1; - else - value = YT_BCR_RESET | YT_BCR_DUPLEX | - YT_BCR_SPEED_SELECT1; + } else { + value = YT_BCR_RESET | YT_BCR_DUPLEX; + if (speed & NGBE_LINK_SPEED_1GB_FULL) + value |= YT_BCR_SPEED_SELECT1; + if (speed & NGBE_LINK_SPEED_100M_FULL) + value |= YT_BCR_SPEED_SELECT0; + } hw->phy.write_reg(hw, YT_BCR, 0, value); rte_spinlock_unlock(&hw->phy_lock); diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h index 37be288a74c..8a7d2cd3311 100644 --- a/drivers/net/ngbe/base/ngbe_type.h +++ b/drivers/net/ngbe/base/ngbe_type.h @@ -116,6 +116,46 @@ struct ngbe_fc_info { enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ }; +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ +/* BitTimes (BT) conversion */ +#define NGBE_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define NGBE_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define NGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define NGBE_CABLE_DC 5556 /* Delay Copper */ + +/* Calculate Interface Delay */ +#define NGBE_PHY_D 12800 +#define NGBE_MAC_D 4096 +#define NGBE_XAUI_D (2 * 1024) + +#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define NGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define NGBE_PCI_DELAY 10000 + +/* Calculate delay value in bit times */ +#define NGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + +#define NGBE_LOW_DV(_max_frame_tc) \ + (2 * ((2 * NGBE_B2BT(_max_frame_tc) + \ + (36 * NGBE_PCI_DELAY / 25) + 1))) + /* Statistics counters collected by the MAC */ /* PB[] RxTx */ struct ngbe_pb_stats { diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index af77081d9a9..478da014b2f 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -90,6 +90,7 @@ static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static void ngbe_dev_interrupt_handler(void *param); static void ngbe_configure_msix(struct rte_eth_dev *dev); +static void ngbe_pbthresh_set(struct rte_eth_dev *dev); #define NGBE_SET_HWSTRIP(h, q) do {\ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ @@ -1037,6 +1038,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) } hw->mac.setup_pba(hw); + ngbe_pbthresh_set(dev); ngbe_configure_port(dev); err = ngbe_dev_rxtx_start(dev); @@ -1165,7 +1167,7 @@ ngbe_dev_stop(struct rte_eth_dev *dev) int vf; if (hw->adapter_stopped) - return 0; + goto out; PMD_INIT_FUNC_TRACE(); @@ -1187,8 +1189,6 @@ ngbe_dev_stop(struct rte_eth_dev *dev) for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; - hw->phy.set_phy_power(hw, false); - ngbe_dev_clear_queues(dev); /* Clear stored conf */ @@ -1215,6 +1215,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) hw->adapter_stopped = true; dev->data->dev_started = 0; +out: + /* close phy to prevent reset in dev_close from restarting physical link */ + hw->phy.set_phy_power(hw, false); + return 0; } @@ -1258,6 +1262,9 @@ ngbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + ngbe_pf_reset_hw(hw); ngbe_dev_stop(dev); @@ -1959,6 +1966,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); } + wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, + NGBE_MACRXFLT_PROMISC); } return rte_eth_linkstatus_set(dev, &link); @@ -2386,6 +2395,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return -EIO; } +/* Additional bittime to account for NGBE framing */ +#define NGBE_ETH_FRAMING 20 + +/* + * ngbe_fc_hpbthresh_set - calculate high water mark for flow control + * + * @dv_id: device interface delay + * @pb: packet buffer to calculate + */ +static s32 +ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + u32 max_frame_size, tc, dv_id, rx_pb; + s32 kb, marker; + + /* Calculate max LAN frame size */ + max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); + tc = max_frame_size + NGBE_ETH_FRAMING; + + /* Calculate delay value for device */ + dv_id = NGBE_DV(tc, tc); + + /* Loopback switch introduces additional latency */ + if (pci_dev->max_vfs) + dv_id += NGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = NGBE_BT2KB(dv_id); + rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10; + + marker = rx_pb - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and do the best we can. + */ + if (marker < 0) { + PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control."); + marker = tc + 1; + } + + return marker; +} + +/* + * ngbe_fc_lpbthresh_set - calculate low water mark for flow control + * + * @dv_id: device interface delay + */ +static s32 +ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + u32 max_frame_size, tc, dv_id; + s32 kb; + + /* Calculate max LAN frame size */ + max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); + tc = max_frame_size + NGBE_ETH_FRAMING; + + /* Calculate delay value for device */ + dv_id = NGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = NGBE_BT2KB(dv_id); + + return kb; +} + +/* + * ngbe_pbthresh_setup - calculate and setup high low water marks + */ +static void +ngbe_pbthresh_set(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + + hw->fc.high_water = ngbe_fc_hpbthresh_set(dev); + hw->fc.low_water = ngbe_fc_lpbthresh_set(dev); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water > hw->fc.high_water) + hw->fc.low_water = 0; +} + int ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index f31906cc2fe..ec353a30b1d 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -2415,6 +2415,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) if (txq != NULL) { txq->ops->release_mbufs(txq); txq->ops->reset(txq); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } @@ -2424,6 +2425,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) if (rxq != NULL) { ngbe_rx_queue_release_mbufs(rxq); ngbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } } diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index 31081af7975..d742bc415c8 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -192,21 +192,36 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) static int eth_dev_start(struct rte_eth_dev *dev) { + uint16_t i; + if (dev == NULL) return -EINVAL; dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } static int eth_dev_stop(struct rte_eth_dev *dev) { + uint16_t i; + if (dev == NULL) return 0; dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c index 57b965ad067..970372bbd79 100644 --- a/drivers/net/octeon_ep/otx_ep_ethdev.c +++ b/drivers/net/octeon_ep/otx_ep_ethdev.c @@ -156,6 +156,11 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev) otx_ep_dev_link_update(eth_dev, 0); otx_ep_info("dev started\n"); + for (q = 0; q < eth_dev->data->nb_rx_queues; q++) + eth_dev->data->rx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; + for (q = 0; q < eth_dev->data->nb_tx_queues; q++) + eth_dev->data->tx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -164,9 +169,15 @@ static int otx_ep_dev_stop(struct rte_eth_dev *eth_dev) { struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev); + uint16_t i; otx_epvf->fn_list.disable_io_queues(otx_epvf); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c index a6ae51a42c4..2a8378a33ea 100644 --- a/drivers/net/octeontx/octeontx_ethdev.c +++ b/drivers/net/octeontx/octeontx_ethdev.c @@ -732,6 +732,11 @@ octeontx_dev_start(struct rte_eth_dev *dev) } /* Success */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return ret; pki_port_stop_error: @@ -746,6 +751,7 @@ static int octeontx_dev_stop(struct rte_eth_dev *dev) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); + uint16_t i; int ret; PMD_INIT_FUNC_TRACE(); @@ -772,6 +778,11 @@ octeontx_dev_stop(struct rte_eth_dev *dev) return ret; } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c index 0352a579509..551f3cf193e 100644 --- a/drivers/net/pfe/pfe_ethdev.c +++ b/drivers/net/pfe/pfe_ethdev.c @@ -241,6 +241,7 @@ pfe_eth_open(struct rte_eth_dev *dev) struct pfe_eth_priv_s *priv = dev->data->dev_private; struct hif_client_s *client; struct hif_shm *hif_shm; + uint16_t i; int rc; /* Register client driver with HIF */ @@ -318,6 +319,10 @@ pfe_eth_open(struct rte_eth_dev *dev) PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); } + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; err0: return rc; @@ -361,6 +366,7 @@ static int pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) { struct pfe_eth_priv_s *priv = dev->data->dev_private; + uint16_t i; dev->data->dev_started = 0; @@ -370,6 +376,11 @@ pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index 97e261d3066..11019b5623f 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -8,6 +8,7 @@ #define __BCM_OSAL_H #include +#include #include #include #include diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index c43dccea7ff..48953dd7a05 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -113,15 +113,30 @@ eth_dev_start(struct rte_eth_dev *dev) static int eth_dev_stop(struct rte_eth_dev *dev) { + uint16_t i; + dev->data->dev_started = 0; dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } static int eth_dev_set_link_down(struct rte_eth_dev *dev) { + uint16_t i; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c index 4c2205f7a4a..1b6374775f0 100644 --- a/drivers/net/sfc/sfc_ef100_tx.c +++ b/drivers/net/sfc/sfc_ef100_tx.c @@ -405,7 +405,7 @@ sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq *txq, m->l2_len + m->l3_len) >> 1; } - rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova_default(m), + rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m), rte_pktmbuf_data_len(m), &dma_addr); if (unlikely(rc != 0)) return rc; diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index a35f20770dc..1b50aefe5c4 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -2864,6 +2864,40 @@ sfc_flow_action_handle_destroy(struct rte_eth_dev *dev, return rc; } +static int +sfc_flow_action_handle_update(struct rte_eth_dev *dev, + struct rte_flow_action_handle *handle, + const void *update, struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct rte_flow_action_handle *entry; + int rc = EINVAL; + + sfc_adapter_lock(sa); + + TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) { + if (entry != handle) + continue; + + if (entry->transfer) { + rc = sfc_mae_indir_action_update(sa, handle, + update, error); + } else { + SFC_ASSERT(B_FALSE); + } + + goto exit; + } + + rc = rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "indirect action handle not found"); + +exit: + sfc_adapter_unlock(sa); + return rc; +} + static int sfc_flow_action_handle_query(struct rte_eth_dev *dev, const struct rte_flow_action_handle *handle, @@ -2907,6 +2941,7 @@ const struct rte_flow_ops sfc_flow_ops = { .isolate = sfc_flow_isolate, .action_handle_create = sfc_flow_action_handle_create, .action_handle_destroy = sfc_flow_action_handle_destroy, + .action_handle_update = sfc_flow_action_handle_update, .action_handle_query = sfc_flow_action_handle_query, .tunnel_decap_set = sfc_ft_decap_set, .tunnel_match = sfc_ft_match, diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h index 06738fe7249..7434554640c 100644 --- a/drivers/net/sfc/sfc_flow.h +++ b/drivers/net/sfc/sfc_flow.h @@ -98,6 +98,7 @@ struct rte_flow_action_handle { enum rte_flow_action_type type; union { + struct sfc_mae_encap_header *encap_header; struct sfc_mae_counter *counter; }; }; diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c index f5fe55b46f0..e5ec0ae49d4 100644 --- a/drivers/net/sfc/sfc_mae.c +++ b/drivers/net/sfc/sfc_mae.c @@ -215,8 +215,37 @@ sfc_mae_attach(struct sfc_adapter *sa) bounce_eh->buf_size = limits.eml_encap_header_size_limit; bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh", bounce_eh->buf_size, 0); - if (bounce_eh->buf == NULL) + if (bounce_eh->buf == NULL) { + rc = ENOMEM; goto fail_mae_alloc_bounce_eh; + } + + sfc_log_init(sa, "allocate bounce action set pointer array"); + mae->bounce_aset_ptrs = rte_calloc("sfc_mae_bounce_aset_ptrs", + EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES, + sizeof(*mae->bounce_aset_ptrs), 0); + if (mae->bounce_aset_ptrs == NULL) { + rc = ENOMEM; + goto fail_mae_alloc_bounce_aset_ptrs; + } + + sfc_log_init(sa, "allocate bounce action set contexts"); + mae->bounce_aset_ctxs = rte_calloc("sfc_mae_bounce_aset_ctxs", + EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES, + sizeof(*mae->bounce_aset_ctxs), 0); + if (mae->bounce_aset_ctxs == NULL) { + rc = ENOMEM; + goto fail_mae_alloc_bounce_aset_ctxs; + } + + sfc_log_init(sa, "allocate bounce action set ID array"); + mae->bounce_aset_ids = rte_calloc("sfc_mae_bounce_aset_ids", + EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES, + sizeof(*mae->bounce_aset_ids), 0); + if (mae->bounce_aset_ids == NULL) { + rc = ENOMEM; + goto fail_mae_alloc_bounce_aset_ids; + } mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios; mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios; @@ -228,6 +257,7 @@ sfc_mae_attach(struct sfc_adapter *sa) TAILQ_INIT(&mae->encap_headers); TAILQ_INIT(&mae->counters); TAILQ_INIT(&mae->action_sets); + TAILQ_INIT(&mae->action_set_lists); TAILQ_INIT(&mae->action_rules); if (encp->enc_mae_admin) @@ -239,6 +269,15 @@ sfc_mae_attach(struct sfc_adapter *sa) return 0; +fail_mae_alloc_bounce_aset_ids: + rte_free(mae->bounce_aset_ctxs); + +fail_mae_alloc_bounce_aset_ctxs: + rte_free(mae->bounce_aset_ptrs); + +fail_mae_alloc_bounce_aset_ptrs: + rte_free(mae->bounce_eh.buf); + fail_mae_alloc_bounce_eh: fail_mae_assign_switch_port: fail_mae_assign_switch_domain: @@ -272,6 +311,9 @@ sfc_mae_detach(struct sfc_adapter *sa) if (status_prev != SFC_MAE_STATUS_ADMIN) return; + rte_free(mae->bounce_aset_ids); + rte_free(mae->bounce_aset_ctxs); + rte_free(mae->bounce_aset_ptrs); rte_free(mae->bounce_eh.buf); sfc_mae_counter_registry_fini(&mae->counter_registry); @@ -663,6 +705,9 @@ sfc_mae_encap_header_attach(struct sfc_adapter *sa, SFC_ASSERT(sfc_adapter_is_locked(sa)); TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) { + if (encap_header->indirect) + continue; + if (encap_header->size == bounce_eh->size && memcmp(encap_header->buf, bounce_eh->buf, bounce_eh->size) == 0) { @@ -746,6 +791,50 @@ sfc_mae_encap_header_del(struct sfc_adapter *sa, sfc_dbg(sa, "deleted encap_header=%p", encap_header); } +static int +sfc_mae_encap_header_update(struct sfc_adapter *sa, + struct sfc_mae_encap_header *encap_header) +{ + const struct sfc_mae_bounce_eh *bounce_eh = &sa->mae.bounce_eh; + struct sfc_mae_fw_rsrc *fw_rsrc; + uint8_t *buf; + int ret; + + if (bounce_eh->type != encap_header->type || + bounce_eh->size == 0) + return EINVAL; + + buf = rte_malloc("sfc_mae_encap_header_buf", bounce_eh->size, 0); + if (buf == NULL) + return ENOMEM; + + rte_memcpy(buf, bounce_eh->buf, bounce_eh->size); + + fw_rsrc = &encap_header->fw_rsrc; + + if (fw_rsrc->refcnt > 0) { + SFC_ASSERT(fw_rsrc->eh_id.id != EFX_MAE_RSRC_ID_INVALID); + + ret = efx_mae_encap_header_update(sa->nic, &fw_rsrc->eh_id, + encap_header->type, buf, + bounce_eh->size); + if (ret != 0) { + sfc_err(sa, "failed to update encap_header=%p: %s", + encap_header, strerror(ret)); + rte_free(buf); + return ret; + } + } + + encap_header->size = bounce_eh->size; + rte_free(encap_header->buf); + encap_header->buf = buf; + + sfc_dbg(sa, "updated encap_header=%p", encap_header); + + return 0; +} + static int sfc_mae_encap_header_enable(struct sfc_adapter *sa, struct sfc_mae_encap_header *encap_header, @@ -987,15 +1076,6 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct sfc_mae_counter *counter) --(fw_rsrc->refcnt); } -struct sfc_mae_aset_ctx { - struct sfc_mae_encap_header *encap_header; - struct sfc_mae_counter *counter; - struct sfc_mae_mac_addr *dst_mac; - struct sfc_mae_mac_addr *src_mac; - - efx_mae_actions_t *spec; -}; - static struct sfc_mae_action_set * sfc_mae_action_set_attach(struct sfc_adapter *sa, const struct sfc_mae_aset_ctx *ctx) @@ -1223,9 +1303,222 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, --(fw_rsrc->refcnt); } +static struct sfc_mae_action_set_list * +sfc_mae_action_set_list_attach(struct sfc_adapter *sa) +{ + struct sfc_mae_action_set_list *action_set_list; + struct sfc_mae *mae = &sa->mae; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + TAILQ_FOREACH(action_set_list, &mae->action_set_lists, entries) { + if (action_set_list->nb_action_sets != mae->nb_bounce_asets) + continue; + + if (memcmp(action_set_list->action_sets, mae->bounce_aset_ptrs, + sizeof(struct sfc_mae_action_set *) * + mae->nb_bounce_asets) == 0) { + sfc_dbg(sa, "attaching to action_set_list=%p", + action_set_list); + ++(action_set_list->refcnt); + return action_set_list; + } + } + + return NULL; +} + +static int +sfc_mae_action_set_list_add(struct sfc_adapter *sa, + struct sfc_mae_action_set_list **action_set_listp) +{ + struct sfc_mae_action_set_list *action_set_list; + struct sfc_mae *mae = &sa->mae; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + action_set_list = rte_zmalloc("sfc_mae_action_set_list", + sizeof(*action_set_list), 0); + if (action_set_list == NULL) { + sfc_err(sa, "failed to allocate action set list"); + return ENOMEM; + } + + action_set_list->refcnt = 1; + action_set_list->nb_action_sets = mae->nb_bounce_asets; + action_set_list->fw_rsrc.aset_list_id.id = EFX_MAE_RSRC_ID_INVALID; + + action_set_list->action_sets = + rte_calloc("sfc_mae_action_set_list_action_sets", + sizeof(struct sfc_mae_action_set *), + action_set_list->nb_action_sets, 0); + if (action_set_list->action_sets == NULL) { + sfc_err(sa, "failed to allocate action set list"); + rte_free(action_set_list); + return ENOMEM; + } + + rte_memcpy(action_set_list->action_sets, mae->bounce_aset_ptrs, + sizeof(struct sfc_mae_action_set *) * + action_set_list->nb_action_sets); + + TAILQ_INSERT_TAIL(&mae->action_set_lists, action_set_list, entries); + + *action_set_listp = action_set_list; + + sfc_dbg(sa, "added action_set_list=%p", action_set_list); + + return 0; +} + +static void +sfc_mae_action_set_list_del(struct sfc_adapter *sa, + struct sfc_mae_action_set_list *action_set_list) +{ + struct sfc_mae *mae = &sa->mae; + unsigned int i; + + if (action_set_list == NULL) + return; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + SFC_ASSERT(action_set_list->refcnt != 0); + + --(action_set_list->refcnt); + + if (action_set_list->refcnt != 0) + return; + + if (action_set_list->fw_rsrc.aset_list_id.id != + EFX_MAE_RSRC_ID_INVALID || action_set_list->fw_rsrc.refcnt != 0) { + sfc_err(sa, "deleting action_set_list=%p abandons its FW resource: ASL_ID=0x%08x, refcnt=%u", + action_set_list, + action_set_list->fw_rsrc.aset_list_id.id, + action_set_list->fw_rsrc.refcnt); + } + + for (i = 0; i < action_set_list->nb_action_sets; ++i) + sfc_mae_action_set_del(sa, action_set_list->action_sets[i]); + + TAILQ_REMOVE(&mae->action_set_lists, action_set_list, entries); + rte_free(action_set_list->action_sets); + rte_free(action_set_list); + + sfc_dbg(sa, "deleted action_set_list=%p", action_set_list); +} + +static int +sfc_mae_action_set_list_enable(struct sfc_adapter *sa, + struct sfc_mae_action_set_list *action_set_list) +{ + struct sfc_mae_fw_rsrc *fw_rsrc; + unsigned int i; + unsigned int j; + int rc; + + if (action_set_list == NULL) + return 0; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + fw_rsrc = &action_set_list->fw_rsrc; + + if (fw_rsrc->refcnt == 0) { + struct sfc_mae *mae = &sa->mae; + + SFC_ASSERT(fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID); + + for (i = 0; i < action_set_list->nb_action_sets; ++i) { + const struct sfc_mae_fw_rsrc *as_fw_rsrc; + + rc = sfc_mae_action_set_enable(sa, + action_set_list->action_sets[i]); + if (rc != 0) + goto fail_action_set_enable; + + as_fw_rsrc = &action_set_list->action_sets[i]->fw_rsrc; + mae->bounce_aset_ids[i].id = as_fw_rsrc->aset_id.id; + } + + rc = efx_mae_action_set_list_alloc(sa->nic, + action_set_list->nb_action_sets, + mae->bounce_aset_ids, + &fw_rsrc->aset_list_id); + if (rc != 0) { + sfc_err(sa, "failed to enable action_set_list=%p: %s", + action_set_list, strerror(rc)); + goto fail_action_set_list_alloc; + } + + sfc_dbg(sa, "enabled action_set_list=%p: ASL_ID=0x%08x", + action_set_list, fw_rsrc->aset_list_id.id); + } + + ++(fw_rsrc->refcnt); + + return 0; + +fail_action_set_list_alloc: +fail_action_set_enable: + for (j = 0; j < i; ++j) + sfc_mae_action_set_disable(sa, action_set_list->action_sets[j]); + + return rc; +} + +static void +sfc_mae_action_set_list_disable(struct sfc_adapter *sa, + struct sfc_mae_action_set_list *action_set_list) +{ + struct sfc_mae_fw_rsrc *fw_rsrc; + int rc; + + if (action_set_list == NULL) + return; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + fw_rsrc = &action_set_list->fw_rsrc; + + if (fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID || + fw_rsrc->refcnt == 0) { + sfc_err(sa, "failed to disable action_set_list=%p: already disabled; ASL_ID=0x%08x, refcnt=%u", + action_set_list, fw_rsrc->aset_list_id.id, + fw_rsrc->refcnt); + return; + } + + if (fw_rsrc->refcnt == 1) { + unsigned int i; + + rc = efx_mae_action_set_list_free(sa->nic, + &fw_rsrc->aset_list_id); + if (rc == 0) { + sfc_dbg(sa, "disabled action_set_list=%p with ASL_ID=0x%08x", + action_set_list, fw_rsrc->aset_list_id.id); + } else { + sfc_err(sa, "failed to disable action_set_list=%p with ASL_ID=0x%08x: %s", + action_set_list, fw_rsrc->aset_list_id.id, + strerror(rc)); + } + fw_rsrc->aset_list_id.id = EFX_MAE_RSRC_ID_INVALID; + + for (i = 0; i < action_set_list->nb_action_sets; ++i) { + sfc_mae_action_set_disable(sa, + action_set_list->action_sets[i]); + } + } + + --(fw_rsrc->refcnt); +} + struct sfc_mae_action_rule_ctx { struct sfc_mae_outer_rule *outer_rule; + /* + * When action_set_list != NULL, action_set is NULL, and vice versa. + */ struct sfc_mae_action_set *action_set; + struct sfc_mae_action_set_list *action_set_list; efx_mae_match_spec_t *match_spec; uint32_t ct_mark; }; @@ -1256,6 +1549,7 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa, if (rule->outer_rule != ctx->outer_rule || rule->action_set != ctx->action_set || + rule->action_set_list != ctx->action_set_list || !!rule->ct_mark != !!ctx->ct_mark) continue; @@ -1331,6 +1625,7 @@ sfc_mae_action_rule_add(struct sfc_adapter *sa, rule->outer_rule = ctx->outer_rule; rule->action_set = ctx->action_set; + rule->action_set_list = ctx->action_set_list; rule->match_spec = ctx->match_spec; rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID; @@ -1367,6 +1662,7 @@ sfc_mae_action_rule_del(struct sfc_adapter *sa, } efx_mae_match_spec_fini(sa->nic, rule->match_spec); + sfc_mae_action_set_list_del(sa, rule->action_set_list); sfc_mae_action_set_del(sa, rule->action_set); sfc_mae_outer_rule_del(sa, rule->outer_rule); @@ -1380,6 +1676,8 @@ static int sfc_mae_action_rule_enable(struct sfc_adapter *sa, struct sfc_mae_action_rule *rule) { + const efx_mae_aset_list_id_t *asl_idp = NULL; + const efx_mae_aset_id_t *as_idp = NULL; struct sfc_mae_fw_rsrc *fw_rsrc; int rc; @@ -1398,9 +1696,18 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa, if (rc != 0) goto fail_action_set_enable; - rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, NULL, - &rule->action_set->fw_rsrc.aset_id, - &fw_rsrc->rule_id); + rc = sfc_mae_action_set_list_enable(sa, rule->action_set_list); + if (rc != 0) + goto fail_action_set_list_enable; + + if (rule->action_set_list != NULL) + asl_idp = &rule->action_set_list->fw_rsrc.aset_list_id; + + if (rule->action_set != NULL) + as_idp = &rule->action_set->fw_rsrc.aset_id; + + rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, asl_idp, + as_idp, &fw_rsrc->rule_id); if (rc != 0) { sfc_err(sa, "failed to enable action_rule=%p: %s", rule, strerror(rc)); @@ -1418,6 +1725,9 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa, return 0; fail_action_rule_insert: + sfc_mae_action_set_list_disable(sa, rule->action_set_list); + +fail_action_set_list_enable: sfc_mae_action_set_disable(sa, rule->action_set); fail_action_set_enable: @@ -1456,6 +1766,8 @@ sfc_mae_action_rule_disable(struct sfc_adapter *sa, fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID; + sfc_mae_action_set_list_disable(sa, rule->action_set_list); + sfc_mae_action_set_disable(sa, rule->action_set); sfc_mae_outer_rule_disable(sa, rule->outer_rule, @@ -4057,6 +4369,9 @@ sfc_mae_rule_parse_action_vxlan_encap( /* Take care of the masks. */ sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items); + if (spec == NULL) + return 0; + rc = efx_mae_action_set_populate_encap(spec); if (rc != 0) { rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION, @@ -4146,7 +4461,7 @@ sfc_mae_rule_parse_action_count(struct sfc_adapter *sa, } static int -sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, +sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, bool replayable_only, const struct rte_flow_action_handle *handle, enum sfc_ft_rule_type ft_rule_type, struct sfc_mae_aset_ctx *ctx, @@ -4157,22 +4472,59 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) { if (entry == handle) { + bool replayable = false; + sfc_dbg(sa, "attaching to indirect_action=%p", entry); switch (entry->type) { case RTE_FLOW_ACTION_TYPE_COUNT: - if (ft_rule_type != SFC_FT_RULE_NONE) { + replayable = true; + break; + default: + break; + } + + if (replayable_only && !replayable) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "the indirect action handle cannot be used"); + } + + switch (entry->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + if (ctx->encap_header != NULL) { return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot use indirect count action in tunnel model"); + "cannot have multiple actions VXLAN_ENCAP in one flow"); + } + + rc = efx_mae_action_set_populate_encap(ctx->spec); + if (rc != 0) { + return rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to add ENCAP to MAE action set"); + } + + ctx->encap_header = entry->encap_header; + ++(ctx->encap_header->refcnt); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (!replayable_only && ctx->counter != NULL) { + /* + * Signal the caller to "replay" the action + * set context and re-invoke this function. + */ + return EEXIST; } - if (ctx->counter != NULL) { + if (ft_rule_type != SFC_FT_RULE_NONE) { return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot have multiple actions COUNT in one flow"); + "cannot use indirect count action in tunnel model"); } + SFC_ASSERT(ctx->counter == NULL); + rc = efx_mae_action_set_populate_count(ctx->spec); if (rc != 0) { return rte_flow_error_set(error, rc, @@ -4347,39 +4699,263 @@ static const char * const action_names[] = { [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP", }; +static void sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh); + +static int sfc_mae_process_encap_header(struct sfc_adapter *sa, + const struct sfc_mae_bounce_eh *bounce_eh, + struct sfc_mae_encap_header **encap_headerp); + static int -sfc_mae_rule_parse_action(struct sfc_adapter *sa, - const struct rte_flow_action *action, - struct rte_flow *flow, bool ct, - struct sfc_mae_actions_bundle *bundle, - struct sfc_mae_aset_ctx *ctx, - struct rte_flow_error *error) +sfc_mae_aset_ctx_replay(struct sfc_adapter *sa, struct sfc_mae_aset_ctx **ctxp) { - struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae; - const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule; - efx_counter_type_t mae_counter_type = EFX_COUNTER_TYPE_ACTION; - const uint64_t rx_metadata = sa->negotiated_rx_metadata; - struct sfc_mae_counter **counterp = &ctx->counter; - efx_mae_actions_t *spec = ctx->spec; - efx_mae_actions_t *spec_ptr = spec; - unsigned int switch_port_type_mask; - bool custom_error = B_FALSE; - int rc = 0; + const struct sfc_mae_aset_ctx *ctx_cur; + struct sfc_mae_aset_ctx *ctx_new; + struct sfc_mae *mae = &sa->mae; + int rc; - if (ct) { - mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK; - counterp = &spec_mae->ct_counter; - spec_ptr = NULL; + RTE_BUILD_BUG_ON(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES == 0); + + /* Check the number of complete action set contexts. */ + if (mae->nb_bounce_asets >= (EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES - 1)) + return ENOSPC; + + ctx_cur = &mae->bounce_aset_ctxs[mae->nb_bounce_asets]; + + ++(mae->nb_bounce_asets); + + ctx_new = &mae->bounce_aset_ctxs[mae->nb_bounce_asets]; + + *ctx_new = *ctx_cur; + ctx_new->counter = NULL; + ctx_new->fate_set = false; + + /* + * This clones the action set specification and drops + * actions COUNT and DELIVER from the clone so that + * such can be added to it by later action parsing. + */ + rc = efx_mae_action_set_replay(sa->nic, ctx_cur->spec, &ctx_new->spec); + if (rc != 0) + return rc; + + *ctxp = ctx_new; + + return 0; +} + +static int +sfc_mae_rule_parse_action_rc(struct sfc_adapter *sa, + struct sfc_mae_actions_bundle *bundle, + const struct rte_flow_action *action, + struct rte_flow_error *error, + int rc, bool custom_error) +{ + if (rc == 0) { + bundle->actions_mask |= (1ULL << action->type); + } else if (!custom_error) { + if (action->type < RTE_DIM(action_names)) { + const char *action_name = action_names[action->type]; + + if (action_name != NULL) { + sfc_err(sa, "action %s was rejected: %s", + action_name, strerror(rc)); + } + } + rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "Failed to request the action"); } + return rc; +} + +static int +sfc_mae_rule_parse_action_replayable(struct sfc_adapter *sa, + const struct rte_flow *flow, + struct sfc_mae_actions_bundle *bundle, + const struct rte_flow_action *action, + struct sfc_mae_aset_ctx *ctx, + struct rte_flow_error *error) +{ + const struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae; + efx_mae_actions_t *spec = ctx->spec; + unsigned int switch_port_type_mask; + bool custom_error = false; + bool new_fate_set = false; + bool need_replay = false; + int rc; + + /* + * Decide whether the current action set context is + * complete. If yes, "replay" it = go to a new one. + */ switch (action->type) { - case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, - bundle->actions_mask); - if (outer_rule == NULL || - outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN) - rc = EINVAL; - else + case RTE_FLOW_ACTION_TYPE_INDIRECT: + if (ctx->fate_set || ctx->counter != NULL) + need_replay = true; + break; + case RTE_FLOW_ACTION_TYPE_PF: + case RTE_FLOW_ACTION_TYPE_VF: + case RTE_FLOW_ACTION_TYPE_PORT_ID: + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + /* FALLTHROUGH */ + case RTE_FLOW_ACTION_TYPE_DROP: + if (ctx->fate_set) + need_replay = true; + + new_fate_set = true; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Unsupported action"); + } + + if (need_replay) { + if (spec_mae->ft_rule_type != SFC_FT_RULE_NONE) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "no support for packet replay in tunnel offload"); + } + + if (!ctx->fate_set) { + /* + * With regard to replayable actions, the current action + * set is only needed to hold one of the counters. + * That is, it does not have a fate action, so + * add one to suppress undesired delivery. + */ + rc = efx_mae_action_set_populate_drop(spec); + if (rc != 0) { + return rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "failed to auto-add action DROP"); + } + } + + rc = sfc_mae_aset_ctx_replay(sa, &ctx); + if (rc != 0) { + return rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "failed to replay the action set"); + } + + spec = ctx->spec; + } + + ctx->fate_set = new_fate_set; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_INDIRECT: + rc = sfc_mae_rule_parse_action_indirect(sa, true, action->conf, + spec_mae->ft_rule_type, + ctx, error); + custom_error = true; + break; + case RTE_FLOW_ACTION_TYPE_PF: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF, + bundle->actions_mask); + rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec); + break; + case RTE_FLOW_ACTION_TYPE_VF: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF, + bundle->actions_mask); + rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec); + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID, + bundle->actions_mask); + rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec); + break; + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR, + bundle->actions_mask); + + switch_port_type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT; + + if (flow->internal) { + switch_port_type_mask |= + 1U << SFC_MAE_SWITCH_PORT_REPRESENTOR; + } + + rc = sfc_mae_rule_parse_action_port_representor(sa, + action->conf, switch_port_type_mask, spec); + break; + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, + bundle->actions_mask); + rc = sfc_mae_rule_parse_action_represented_port(sa, + action->conf, spec); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, + bundle->actions_mask); + rc = efx_mae_action_set_populate_drop(spec); + break; + default: + SFC_ASSERT(B_FALSE); + break; + } + + return sfc_mae_rule_parse_action_rc(sa, bundle, action, error, + rc, custom_error); +} + +static int +sfc_mae_rule_parse_action(struct sfc_adapter *sa, + const struct rte_flow_action *action, + struct rte_flow *flow, bool ct, + struct sfc_mae_actions_bundle *bundle, + struct rte_flow_error *error) +{ + struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae; + const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule; + efx_counter_type_t mae_counter_type = EFX_COUNTER_TYPE_ACTION; + const uint64_t rx_metadata = sa->negotiated_rx_metadata; + struct sfc_mae_counter **counterp; + bool non_replayable_found = true; + struct sfc_mae *mae = &sa->mae; + struct sfc_mae_aset_ctx *ctx; + efx_mae_actions_t *spec_ptr; + bool custom_error = B_FALSE; + efx_mae_actions_t *spec; + int rc = 0; + + /* Check the number of complete action set contexts. */ + if (mae->nb_bounce_asets > (EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES - 1)) { + return sfc_mae_rule_parse_action_rc(sa, bundle, action, error, + ENOSPC, custom_error); + } + + ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets]; + counterp = &ctx->counter; + spec = ctx->spec; + spec_ptr = spec; + + if (ct) { + mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK; + counterp = &spec_mae->ct_counter; + spec_ptr = NULL; + } + + if (mae->nb_bounce_asets != 0 || ctx->fate_set) { + /* + * When at least one delivery action has been encountered, + * non-replayable actions (packet edits, for instance) + * will be turned down. + */ + return sfc_mae_rule_parse_action_replayable(sa, flow, bundle, + action, ctx, error); + } + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, + bundle->actions_mask); + if (outer_rule == NULL || + outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN) + rc = EINVAL; + else rc = efx_mae_action_set_populate_decap(spec); break; case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: @@ -4447,10 +5023,18 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa, case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, bundle->actions_mask); - rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae, - action->conf, + + /* Cleanup after previous encap. header bounce buffer usage. */ + sfc_mae_bounce_eh_invalidate(&mae->bounce_eh); + + rc = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf, spec, error); - custom_error = B_TRUE; + if (rc == 0) { + rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, + &ctx->encap_header); + } else { + custom_error = true; + } break; case RTE_FLOW_ACTION_TYPE_COUNT: SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT, @@ -4462,9 +5046,13 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa, case RTE_FLOW_ACTION_TYPE_INDIRECT: SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_INDIRECT, bundle->actions_mask); - rc = sfc_mae_rule_parse_action_indirect(sa, action->conf, + rc = sfc_mae_rule_parse_action_indirect(sa, false, action->conf, spec_mae->ft_rule_type, ctx, error); + if (rc == EEXIST) { + /* Handle the action as a replayable one below. */ + non_replayable_found = false; + } custom_error = B_TRUE; break; case RTE_FLOW_ACTION_TYPE_FLAG: @@ -4495,46 +5083,6 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa, custom_error = B_TRUE; } break; - case RTE_FLOW_ACTION_TYPE_PF: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF, - bundle->actions_mask); - rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec); - break; - case RTE_FLOW_ACTION_TYPE_VF: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF, - bundle->actions_mask); - rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec); - break; - case RTE_FLOW_ACTION_TYPE_PORT_ID: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID, - bundle->actions_mask); - rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec); - break; - case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR, - bundle->actions_mask); - - switch_port_type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT; - - if (flow->internal) { - switch_port_type_mask |= - 1U << SFC_MAE_SWITCH_PORT_REPRESENTOR; - } - - rc = sfc_mae_rule_parse_action_port_representor(sa, - action->conf, switch_port_type_mask, spec); - break; - case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, - bundle->actions_mask); - rc = sfc_mae_rule_parse_action_represented_port(sa, - action->conf, spec); - break; - case RTE_FLOW_ACTION_TYPE_DROP: - SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, - bundle->actions_mask); - rc = efx_mae_action_set_populate_drop(spec); - break; case RTE_FLOW_ACTION_TYPE_JUMP: if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) { /* Workaround. See sfc_flow_parse_rte_to_mae() */ @@ -4542,27 +5090,16 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa, } /* FALLTHROUGH */ default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "Unsupported action"); + non_replayable_found = false; } - if (rc == 0) { - bundle->actions_mask |= (1ULL << action->type); - } else if (!custom_error) { - if (action->type < RTE_DIM(action_names)) { - const char *action_name = action_names[action->type]; - - if (action_name != NULL) { - sfc_err(sa, "action %s was rejected: %s", - action_name, strerror(rc)); - } - } - rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "Failed to request the action"); + if (non_replayable_found) { + return sfc_mae_rule_parse_action_rc(sa, bundle, action, error, + rc, custom_error); } - return rc; + return sfc_mae_rule_parse_action_replayable(sa, flow, bundle, + action, ctx, error); } static void @@ -4588,6 +5125,78 @@ sfc_mae_process_encap_header(struct sfc_adapter *sa, return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp); } +static int +sfc_mae_rule_process_replay(struct sfc_adapter *sa, + struct sfc_mae_action_rule_ctx *action_rule_ctx) +{ + struct sfc_mae_action_set *base_aset; + struct sfc_mae_action_set **asetp; + struct sfc_mae *mae = &sa->mae; + struct sfc_mae_aset_ctx *ctx; + unsigned int i; + unsigned int j; + int rc; + + if (mae->nb_bounce_asets == 1) + return 0; + + mae->bounce_aset_ptrs[0] = action_rule_ctx->action_set; + base_aset = mae->bounce_aset_ptrs[0]; + + for (i = 1; i < mae->nb_bounce_asets; ++i) { + asetp = &mae->bounce_aset_ptrs[i]; + ctx = &mae->bounce_aset_ctxs[i]; + + *asetp = sfc_mae_action_set_attach(sa, ctx); + if (*asetp != NULL) { + efx_mae_action_set_spec_fini(sa->nic, ctx->spec); + sfc_mae_counter_del(sa, ctx->counter); + continue; + } + + rc = sfc_mae_action_set_add(sa, ctx, asetp); + if (rc != 0) + goto fail_action_set_add; + + if (base_aset->encap_header != NULL) + ++(base_aset->encap_header->refcnt); + + if (base_aset->dst_mac_addr != NULL) + ++(base_aset->dst_mac_addr->refcnt); + + if (base_aset->src_mac_addr != NULL) + ++(base_aset->src_mac_addr->refcnt); + } + + action_rule_ctx->action_set_list = sfc_mae_action_set_list_attach(sa); + if (action_rule_ctx->action_set_list != NULL) { + for (i = 0; i < mae->nb_bounce_asets; ++i) + sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]); + } else { + rc = sfc_mae_action_set_list_add(sa, + &action_rule_ctx->action_set_list); + if (rc != 0) + goto fail_action_set_list_add; + } + + action_rule_ctx->action_set = NULL; + + return 0; + +fail_action_set_list_add: +fail_action_set_add: + for (j = i; j < mae->nb_bounce_asets; ++j) { + ctx = &mae->bounce_aset_ctxs[j]; + efx_mae_action_set_spec_fini(sa->nic, ctx->spec); + sfc_mae_counter_del(sa, ctx->counter); + } + + while (--i > 0) + sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]); + + return rc; +} + static int sfc_mae_rule_parse_actions(struct sfc_adapter *sa, const struct rte_flow_action actions[], @@ -4599,8 +5208,9 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, struct sfc_mae_actions_bundle bundle = {0}; bool ct = (action_rule_ctx->ct_mark != 0); const struct rte_flow_action *action; - struct sfc_mae_aset_ctx ctx = {0}; + struct sfc_mae_aset_ctx *last_ctx; struct sfc_mae *mae = &sa->mae; + struct sfc_mae_aset_ctx *ctx; int rc; rte_errno = 0; @@ -4611,7 +5221,18 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, "NULL actions"); } - rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec); + /* + * Cleanup after action parsing of the previous flow. + * + * This particular variable always points at the + * 1st (base) action set context, which can hold + * both non-replayable and replayable actions. + */ + ctx = &mae->bounce_aset_ctxs[0]; + memset(ctx, 0, sizeof(*ctx)); + mae->nb_bounce_asets = 0; + + rc = efx_mae_action_set_spec_init(sa->nic, &ctx->spec); if (rc != 0) goto fail_action_set_spec_init; @@ -4619,7 +5240,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, bool have_user_action_count = false; /* TUNNEL rules don't decapsulate packets. SWITCH rules do. */ - rc = efx_mae_action_set_populate_decap(ctx.spec); + rc = efx_mae_action_set_populate_decap(ctx->spec); if (rc != 0) goto fail_enforce_ft_decap; @@ -4639,63 +5260,62 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, * packets hitting this rule contribute to the tunnel's * total number of hits. See sfc_mae_counter_get(). */ - rc = efx_mae_action_set_populate_count(ctx.spec); + rc = efx_mae_action_set_populate_count(ctx->spec); if (rc != 0) goto fail_enforce_ft_count; - rc = sfc_mae_counter_add(sa, NULL, &ctx.counter); + rc = sfc_mae_counter_add(sa, NULL, &ctx->counter); if (rc != 0) goto fail_enforce_ft_count; } } - /* Cleanup after previous encap. header bounce buffer usage. */ - sfc_mae_bounce_eh_invalidate(&mae->bounce_eh); - for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { - rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae, - ctx.spec, ct, error); - if (rc != 0) - goto fail_rule_parse_action; + if (mae->nb_bounce_asets == 0) { + rc = sfc_mae_actions_bundle_sync(action, &bundle, + spec_mae, ctx->spec, + ct, error); + if (rc != 0) + goto fail_rule_parse_action; + } rc = sfc_mae_rule_parse_action(sa, action, flow, ct, - &bundle, &ctx, error); + &bundle, error); if (rc != 0) goto fail_rule_parse_action; } - rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae, - ctx.spec, ct, error); - if (rc != 0) - goto fail_rule_parse_action; - - rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, - &ctx.encap_header); - if (rc != 0) - goto fail_process_encap_header; + if (mae->nb_bounce_asets == 0) { + rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae, + ctx->spec, ct, error); + if (rc != 0) + goto fail_rule_parse_action; + } switch (spec_mae->ft_rule_type) { case SFC_FT_RULE_NONE: break; case SFC_FT_RULE_TUNNEL: /* Workaround. See sfc_flow_parse_rte_to_mae() */ - rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec); + rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx->spec); if (rc != 0) goto fail_workaround_tunnel_delivery; - if (ctx.counter != NULL) - (ctx.counter)->ft_ctx = spec_mae->ft_ctx; + if (ctx->counter != NULL) + (ctx->counter)->ft_ctx = spec_mae->ft_ctx; + + ctx->fate_set = true; break; case SFC_FT_RULE_SWITCH: /* * Packets that go to the rule's AR have FT mark set (from * the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero. */ - efx_mae_action_set_populate_mark_reset(ctx.spec); + efx_mae_action_set_populate_mark_reset(ctx->spec); - if (ctx.counter != NULL) { - (ctx.counter)->ft_switch_hit_counter = + if (ctx->counter != NULL) { + (ctx->counter)->ft_switch_hit_counter = &spec_mae->ft_ctx->switch_hit_counter; } else if (sfc_mae_counter_stream_enabled(sa)) { SFC_ASSERT(ct); @@ -4708,48 +5328,53 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, SFC_ASSERT(B_FALSE); } - /* - * A DPDK flow entry must specify a fate action, which the parser - * converts into a DELIVER action in a libefx action set. An - * attempt to replace the action in the action set should - * fail. If it succeeds then report an error, as the - * parsed flow entry did not contain a fate action. - */ - rc = efx_mae_action_set_populate_drop(ctx.spec); - if (rc == 0) { + SFC_ASSERT(mae->nb_bounce_asets < EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES); + last_ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets]; + ++(mae->nb_bounce_asets); + + if (!last_ctx->fate_set) { rc = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no fate action found"); goto fail_check_fate_action; } - action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, &ctx); + action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, ctx); if (action_rule_ctx->action_set != NULL) { - sfc_mae_counter_del(sa, ctx.counter); - sfc_mae_mac_addr_del(sa, ctx.src_mac); - sfc_mae_mac_addr_del(sa, ctx.dst_mac); - sfc_mae_encap_header_del(sa, ctx.encap_header); - efx_mae_action_set_spec_fini(sa->nic, ctx.spec); - return 0; + sfc_mae_counter_del(sa, ctx->counter); + sfc_mae_mac_addr_del(sa, ctx->src_mac); + sfc_mae_mac_addr_del(sa, ctx->dst_mac); + sfc_mae_encap_header_del(sa, ctx->encap_header); + efx_mae_action_set_spec_fini(sa->nic, ctx->spec); + } else { + rc = sfc_mae_action_set_add(sa, ctx, + &action_rule_ctx->action_set); + if (rc != 0) + goto fail_action_set_add; } - rc = sfc_mae_action_set_add(sa, &ctx, &action_rule_ctx->action_set); + memset(ctx, 0, sizeof(*ctx)); + + rc = sfc_mae_rule_process_replay(sa, action_rule_ctx); if (rc != 0) - goto fail_action_set_add; + goto fail_rule_parse_replay; return 0; +fail_rule_parse_replay: + sfc_mae_action_set_del(sa, action_rule_ctx->action_set); + fail_action_set_add: fail_check_fate_action: fail_workaround_tunnel_delivery: - sfc_mae_encap_header_del(sa, ctx.encap_header); - -fail_process_encap_header: fail_rule_parse_action: - sfc_mae_counter_del(sa, ctx.counter); - sfc_mae_mac_addr_del(sa, ctx.src_mac); - sfc_mae_mac_addr_del(sa, ctx.dst_mac); - efx_mae_action_set_spec_fini(sa->nic, ctx.spec); + sfc_mae_encap_header_del(sa, ctx->encap_header); + sfc_mae_counter_del(sa, ctx->counter); + sfc_mae_mac_addr_del(sa, ctx->src_mac); + sfc_mae_mac_addr_del(sa, ctx->dst_mac); + + if (ctx->spec != NULL) + efx_mae_action_set_spec_fini(sa->nic, ctx->spec); fail_enforce_ft_count: fail_enforce_ft_decap: @@ -4806,6 +5431,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[], error); if (rc == 0) { efx_mae_match_spec_fini(sa->nic, ctx.match_spec); + sfc_mae_action_set_list_del(sa, ctx.action_set_list); sfc_mae_action_set_del(sa, ctx.action_set); sfc_mae_outer_rule_del(sa, ctx.outer_rule); } else if (rc == -ENOENT) { @@ -4833,6 +5459,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[], if (ctx.match_spec != NULL) efx_mae_match_spec_fini(sa->nic, ctx.match_spec); + sfc_mae_action_set_list_del(sa, ctx.action_set_list); sfc_mae_action_set_del(sa, ctx.action_set); sfc_mae_outer_rule_del(sa, ctx.outer_rule); @@ -5051,6 +5678,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa, const struct rte_flow_action_count *conf = action->conf; struct sfc_mae_counter *counters[1 /* action rule counter */ + 1 /* conntrack counter */]; + struct sfc_mae_counter *counter; unsigned int i; int rc; @@ -5068,7 +5696,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa, counters[1] = spec->ct_counter; for (i = 0; i < RTE_DIM(counters); ++i) { - struct sfc_mae_counter *counter = counters[i]; + counter = counters[i]; if (counter == NULL) continue; @@ -5086,6 +5714,29 @@ sfc_mae_query_counter(struct sfc_adapter *sa, } } + if (action_rule == NULL || action_rule->action_set_list == NULL) + goto exit; + + for (i = 0; i < action_rule->action_set_list->nb_action_sets; ++i) { + counter = action_rule->action_set_list->action_sets[i]->counter; + + if (counter == NULL || counter->indirect) + continue; + + if (conf == NULL || + (counter->rte_id_valid && conf->id == counter->rte_id)) { + rc = sfc_mae_counter_get(sa, counter, data); + if (rc != 0) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Queried flow rule counter action is invalid"); + } + + return 0; + } + } + +exit: return rte_flow_error_set(error, ENOENT, RTE_FLOW_ERROR_TYPE_ACTION, action, "no such flow rule action or such count ID"); @@ -5182,12 +5833,31 @@ sfc_mae_indir_action_create(struct sfc_adapter *sa, struct rte_flow_action_handle *handle, struct rte_flow_error *error) { + struct sfc_mae *mae = &sa->mae; + bool custom_error = false; int ret; SFC_ASSERT(sfc_adapter_is_locked(sa)); SFC_ASSERT(handle != NULL); switch (action->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + /* Cleanup after previous encap. header bounce buffer usage. */ + sfc_mae_bounce_eh_invalidate(&mae->bounce_eh); + + ret = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf, + NULL, error); + if (ret != 0) { + custom_error = true; + break; + } + + ret = sfc_mae_encap_header_add(sa, &mae->bounce_eh, + &handle->encap_header); + if (ret == 0) + handle->encap_header->indirect = true; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: ret = sfc_mae_rule_parse_action_count(sa, action->conf, EFX_COUNTER_TYPE_ACTION, @@ -5199,6 +5869,9 @@ sfc_mae_indir_action_create(struct sfc_adapter *sa, ret = ENOTSUP; } + if (custom_error) + return ret; + if (ret != 0) { return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -5219,6 +5892,12 @@ sfc_mae_indir_action_destroy(struct sfc_adapter *sa, SFC_ASSERT(handle != NULL); switch (handle->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + if (handle->encap_header->refcnt != 1) + goto fail; + + sfc_mae_encap_header_del(sa, handle->encap_header); + break; case RTE_FLOW_ACTION_TYPE_COUNT: if (handle->counter->refcnt != 1) goto fail; @@ -5237,6 +5916,50 @@ sfc_mae_indir_action_destroy(struct sfc_adapter *sa, NULL, "indirect action is still in use"); } +int +sfc_mae_indir_action_update(struct sfc_adapter *sa, + struct rte_flow_action_handle *handle, + const void *update, struct rte_flow_error *error) +{ + const struct rte_flow_action *action = update; + struct sfc_mae *mae = &sa->mae; + bool custom_error = false; + int ret; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + SFC_ASSERT(action != NULL); + SFC_ASSERT(handle != NULL); + + switch (handle->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + /* Cleanup after previous encap. header bounce buffer usage. */ + sfc_mae_bounce_eh_invalidate(&mae->bounce_eh); + + ret = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf, + NULL, error); + if (ret != 0) { + custom_error = true; + break; + } + + ret = sfc_mae_encap_header_update(sa, handle->encap_header); + break; + default: + ret = ENOTSUP; + } + + if (custom_error) + return ret; + + if (ret != 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to parse indirect action to mae object"); + } + + return 0; +} + int sfc_mae_indir_action_query(struct sfc_adapter *sa, const struct rte_flow_action_handle *handle, diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h index 646d055ac1b..2bdf5eeec22 100644 --- a/drivers/net/sfc/sfc_mae.h +++ b/drivers/net/sfc/sfc_mae.h @@ -26,6 +26,7 @@ extern "C" { struct sfc_mae_fw_rsrc { unsigned int refcnt; union { + efx_mae_aset_list_id_t aset_list_id; efx_counter_t counter_id; efx_mae_aset_id_t aset_id; efx_mae_rule_id_t rule_id; @@ -59,6 +60,7 @@ TAILQ_HEAD(sfc_mae_mac_addrs, sfc_mae_mac_addr); struct sfc_mae_encap_header { TAILQ_ENTRY(sfc_mae_encap_header) entries; unsigned int refcnt; + bool indirect; uint8_t *buf; size_t size; efx_tunnel_protocol_t type; @@ -104,12 +106,27 @@ struct sfc_mae_action_set { TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set); +/** Action set list registry entry */ +struct sfc_mae_action_set_list { + TAILQ_ENTRY(sfc_mae_action_set_list) entries; + unsigned int refcnt; + unsigned int nb_action_sets; + struct sfc_mae_action_set **action_sets; + struct sfc_mae_fw_rsrc fw_rsrc; +}; + +TAILQ_HEAD(sfc_mae_action_set_lists, sfc_mae_action_set_list); + /** Action rule registry entry */ struct sfc_mae_action_rule { TAILQ_ENTRY(sfc_mae_action_rule) entries; uint32_t ct_mark; struct sfc_mae_outer_rule *outer_rule; + /* + * When action_set_list != NULL, action_set is NULL, and vice versa. + */ struct sfc_mae_action_set *action_set; + struct sfc_mae_action_set_list *action_set_list; efx_mae_match_spec_t *match_spec; struct sfc_mae_fw_rsrc fw_rsrc; unsigned int refcnt; @@ -196,13 +213,25 @@ struct sfc_mae_counter_registry { } service; struct { /** Counter thread ID */ - pthread_t id; + rte_thread_t id; /** The thread should keep running */ bool run; } thread; } polling; }; +/* Entry format for the action parsing bounce buffer */ +struct sfc_mae_aset_ctx { + struct sfc_mae_encap_header *encap_header; + struct sfc_mae_counter *counter; + struct sfc_mae_mac_addr *dst_mac; + struct sfc_mae_mac_addr *src_mac; + + bool fate_set; + + efx_mae_actions_t *spec; +}; + struct sfc_mae { /** Assigned switch domain identifier */ uint16_t switch_domain_id; @@ -224,10 +253,19 @@ struct sfc_mae { struct sfc_mae_mac_addrs mac_addrs; /** Action set registry */ struct sfc_mae_action_sets action_sets; + /** Action set list registry */ + struct sfc_mae_action_set_lists action_set_lists; /** Action rule registry */ struct sfc_mae_action_rules action_rules; /** Encap. header bounce buffer */ struct sfc_mae_bounce_eh bounce_eh; + /** + * Action parsing bounce buffers + */ + struct sfc_mae_action_set **bounce_aset_ptrs; + struct sfc_mae_aset_ctx *bounce_aset_ctxs; + efx_mae_aset_id_t *bounce_aset_ids; + unsigned int nb_bounce_asets; /** Flag indicating whether counter-only RxQ is running */ bool counter_rxq_running; /** Counter record registry */ @@ -420,6 +458,11 @@ int sfc_mae_indir_action_destroy(struct sfc_adapter *sa, const struct rte_flow_action_handle *handle, struct rte_flow_error *error); +int sfc_mae_indir_action_update(struct sfc_adapter *sa, + struct rte_flow_action_handle *handle, + const void *update, + struct rte_flow_error *error); + int sfc_mae_indir_action_query(struct sfc_adapter *sa, const struct rte_flow_action_handle *handle, void *data, struct rte_flow_error *error); diff --git a/drivers/net/sfc/sfc_mae_counter.c b/drivers/net/sfc/sfc_mae_counter.c index 79043ff7d75..ba172957194 100644 --- a/drivers/net/sfc/sfc_mae_counter.c +++ b/drivers/net/sfc/sfc_mae_counter.c @@ -490,7 +490,7 @@ sfc_mae_counter_service_routine(void *arg) return 0; } -static void * +static uint32_t sfc_mae_counter_thread(void *data) { struct sfc_adapter *sa = data; @@ -521,7 +521,7 @@ sfc_mae_counter_thread(void *data) } } - return NULL; + return 0; } static void @@ -687,7 +687,7 @@ sfc_mae_counter_thread_stop(struct sfc_adapter *sa) __atomic_store_n(&counter_registry->polling.thread.run, false, __ATOMIC_RELEASE); - rc = pthread_join(counter_registry->polling.thread.id, NULL); + rc = rte_thread_join(counter_registry->polling.thread.id, NULL); if (rc != 0) sfc_err(sa, "failed to join the MAE counter polling thread"); @@ -710,9 +710,8 @@ sfc_mae_counter_thread_spawn(struct sfc_adapter *sa, counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_THREAD; counter_registry->polling.thread.run = true; - rc = rte_ctrl_thread_create(&sa->mae.counter_registry.polling.thread.id, - "mae_counter_thread", NULL, - sfc_mae_counter_thread, sa); + rc = rte_thread_create_internal_control(&sa->mae.counter_registry.polling.thread.id, + "sfc-maecnt", sfc_mae_counter_thread, sa); return rc; } diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c index 6c7727d5698..c2e5d4eb9e2 100644 --- a/drivers/net/sfc/sfc_repr.c +++ b/drivers/net/sfc/sfc_repr.c @@ -263,6 +263,7 @@ static int sfc_repr_dev_start(struct rte_eth_dev *dev) { struct sfc_repr *sr = sfc_repr_by_eth_dev(dev); + uint16_t i; int ret; sfcr_info(sr, "entry"); @@ -274,6 +275,11 @@ sfc_repr_dev_start(struct rte_eth_dev *dev) if (ret != 0) goto fail_start; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + sfcr_info(sr, "done"); return 0; @@ -338,6 +344,7 @@ static int sfc_repr_dev_stop(struct rte_eth_dev *dev) { struct sfc_repr *sr = sfc_repr_by_eth_dev(dev); + uint16_t i; int ret; sfcr_info(sr, "entry"); @@ -352,6 +359,11 @@ sfc_repr_dev_stop(struct rte_eth_dev *dev) sfc_repr_unlock(sr); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + sfcr_info(sr, "done"); return 0; @@ -502,6 +514,7 @@ sfc_repr_dev_infos_get(struct rte_eth_dev *dev, dev_info->device = dev->device; + dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; dev_info->max_rx_queues = SFC_REPR_RXQ_MAX; dev_info->max_tx_queues = SFC_REPR_TXQ_MAX; dev_info->default_rxconf.rx_drop_en = 1; diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c index bcf6664460a..1b90cf7a21e 100644 --- a/drivers/net/softnic/rte_eth_softnic.c +++ b/drivers/net/softnic/rte_eth_softnic.c @@ -134,6 +134,7 @@ pmd_dev_start(struct rte_eth_dev *dev) { struct pmd_internals *p = dev->data->dev_private; int status; + uint16_t i; /* Firmware */ status = softnic_cli_script_process(p, @@ -146,6 +147,11 @@ pmd_dev_start(struct rte_eth_dev *dev) /* Link UP */ dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -153,6 +159,7 @@ static int pmd_dev_stop(struct rte_eth_dev *dev) { struct pmd_internals *p = dev->data->dev_private; + uint16_t i; /* Link DOWN */ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; @@ -163,6 +170,11 @@ pmd_dev_stop(struct rte_eth_dev *dev) softnic_softnic_swq_free_keep_rxq_txq(p); softnic_mempool_free(p); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index bf98f755599..b25a52655fa 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -2267,29 +2267,6 @@ set_remote_iface(const char *key __rte_unused, return 0; } -static int parse_user_mac(struct rte_ether_addr *user_mac, - const char *value) -{ - unsigned int index = 0; - char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL; - - if (user_mac == NULL || value == NULL) - return 0; - - strlcpy(mac_temp, value, sizeof(mac_temp)); - mac_byte = strtok(mac_temp, ":"); - - while ((mac_byte != NULL) && - (strlen(mac_byte) <= 2) && - (strlen(mac_byte) == strspn(mac_byte, - ETH_TAP_CMP_MAC_FMT))) { - user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16); - mac_byte = strtok(NULL, ":"); - } - - return index; -} - static int set_mac_type(const char *key __rte_unused, const char *value, @@ -2311,7 +2288,7 @@ set_mac_type(const char *key __rte_unused, goto success; } - if (parse_user_mac(user_mac, value) != 6) + if (rte_ether_unformat_addr(value, user_mac) < 0) goto error; success: TAP_LOG(DEBUG, "TAP user MAC param (%s)", value); diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index d942b542ea1..6bc231a1306 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -1499,6 +1499,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } + + /* + * When DCB/VT is off, maximum number of queues changes + */ + if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE) { + if (nb_tx_q > TXGBE_NONE_MODE_TX_NB_QUEUES) { + PMD_INIT_LOG(ERR, + "Neither VT nor DCB are enabled, " + "nb_tx_q > %d.", + TXGBE_NONE_MODE_TX_NB_QUEUES); + return -EINVAL; + } + } } return 0; } @@ -1916,7 +1929,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); if (hw->adapter_stopped) - return 0; + goto out; PMD_INIT_FUNC_TRACE(); @@ -1940,14 +1953,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; - if (hw->phy.media_type == txgbe_media_type_copper) { - /* Turn off the copper */ - hw->phy.set_phy_power(hw, false); - } else { - /* Turn off the laser */ - hw->mac.disable_tx_laser(hw); - } - txgbe_dev_clear_queues(dev); /* Clear stored conf */ @@ -1978,6 +1983,16 @@ txgbe_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->dev_start = false; +out: + /* close phy to prevent reset in dev_close from restarting physical link */ + if (hw->phy.media_type == txgbe_media_type_copper) { + /* Turn off the copper */ + hw->phy.set_phy_power(hw, false); + } else { + /* Turn off the laser */ + hw->mac.disable_tx_laser(hw); + } + return 0; } @@ -2037,6 +2052,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + txgbe_pf_reset_hw(hw); ret = txgbe_dev_stop(dev); @@ -2896,9 +2914,9 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, * when there is no link thread running. */ intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; - if (rte_thread_create_control(&ad->link_thread_tid, - "txgbe-link-thread", NULL, - txgbe_dev_setup_link_thread_handler, dev) < 0) { + if (rte_thread_create_internal_control(&ad->link_thread_tid, + "txgbe-link", + txgbe_dev_setup_link_thread_handler, dev) < 0) { PMD_DRV_LOG(ERR, "Create link thread failed!"); __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST); } @@ -2944,6 +2962,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, break; } + /* Re configure MAC RX */ + if (hw->mac.type == txgbe_mac_raptor) + wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_PROMISC, + TXGBE_MACRXFLT_PROMISC); + return rte_eth_linkstatus_set(dev, &link); } diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 6b296d6fd16..7feb45d0cf2 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -41,6 +41,7 @@ /*Default value of Max Rx Queue*/ #define TXGBE_MAX_RX_QUEUE_NUM 128 #define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM +#define TXGBE_NONE_MODE_TX_NB_QUEUES 64 #ifndef NBBY #define NBBY 8 /* number of bits in a byte */ diff --git a/drivers/net/txgbe/txgbe_ptypes.c b/drivers/net/txgbe/txgbe_ptypes.c index e1299d73637..c444d5d3f1f 100644 --- a/drivers/net/txgbe/txgbe_ptypes.c +++ b/drivers/net/txgbe/txgbe_ptypes.c @@ -320,8 +320,6 @@ txgbe_encode_ptype_tunnel(u32 ptype) ptid |= TXGBE_PTID_TUN_EI; break; case RTE_PTYPE_TUNNEL_GRE: - ptid |= TXGBE_PTID_TUN_EIG; - break; case RTE_PTYPE_TUNNEL_VXLAN: case RTE_PTYPE_TUNNEL_VXLAN_GPE: case RTE_PTYPE_TUNNEL_NVGRE: @@ -332,20 +330,6 @@ txgbe_encode_ptype_tunnel(u32 ptype) return ptid; } - switch (ptype & RTE_PTYPE_INNER_L2_MASK) { - case RTE_PTYPE_INNER_L2_ETHER: - ptid |= TXGBE_PTID_TUN_EIGM; - break; - case RTE_PTYPE_INNER_L2_ETHER_VLAN: - ptid |= TXGBE_PTID_TUN_EIGMV; - break; - case RTE_PTYPE_INNER_L2_ETHER_QINQ: - ptid |= TXGBE_PTID_TUN_EIGMV; - break; - default: - break; - } - switch (ptype & RTE_PTYPE_INNER_L3_MASK) { case RTE_PTYPE_INNER_L3_IPV4: case RTE_PTYPE_INNER_L3_IPV4_EXT: diff --git a/drivers/net/txgbe/txgbe_ptypes.h b/drivers/net/txgbe/txgbe_ptypes.h index fa6c347d53d..6fa8147f056 100644 --- a/drivers/net/txgbe/txgbe_ptypes.h +++ b/drivers/net/txgbe/txgbe_ptypes.h @@ -348,4 +348,9 @@ struct txgbe_nvgrehdr { __be32 tni; }; +struct txgbe_grehdr { + __be16 flags; + __be16 proto; +}; + #endif /* _TXGBE_PTYPE_H_ */ diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 427f8b82ac8..834ada886ac 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -572,7 +572,6 @@ tx_desc_ol_flags_to_ptype(uint64_t oflags) ptype |= RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE; - ptype |= RTE_PTYPE_INNER_L2_ETHER; break; case RTE_MBUF_F_TX_TUNNEL_GENEVE: ptype |= RTE_PTYPE_L2_ETHER | @@ -705,22 +704,24 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) static inline uint8_t txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) { - uint64_t l2_none, l2_mac, l2_mac_vlan; + uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan; + uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan; uint8_t ptid = 0; - if ((tx_pkt->ol_flags & (RTE_MBUF_F_TX_TUNNEL_VXLAN | - RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) == 0) - return ptid; + l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); + l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr); + l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr); - l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); - l2_mac = l2_none + sizeof(struct rte_ether_hdr); - l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr); + l2_gre = sizeof(struct txgbe_grehdr); + l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr); + l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr); - if (tx_pkt->l2_len == l2_none) + if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre) ptid = TXGBE_PTID_TUN_EIG; - else if (tx_pkt->l2_len == l2_mac) + else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac) ptid = TXGBE_PTID_TUN_EIGM; - else if (tx_pkt->l2_len == l2_mac_vlan) + else if (tx_pkt->l2_len == l2_vxlan_mac_vlan || + tx_pkt->l2_len == l2_gre_mac_vlan) ptid = TXGBE_PTID_TUN_EIGMV; return ptid; @@ -2805,6 +2806,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txq->ops->release_mbufs(txq); txq->ops->reset(txq); } + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -2814,6 +2817,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txgbe_rx_queue_release_mbufs(rxq); txgbe_reset_rx_queue(adapter, rxq); } + + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } } @@ -5004,6 +5009,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); + else + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -5018,6 +5025,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); + else + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; rte_wmb(); wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1); } diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 8d37ec97754..21bbb008e03 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -1135,6 +1135,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev) { struct pmd_internal *internal = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + uint16_t i; eth_vhost_uninstall_intr(eth_dev); if (dev_conf->intr_conf.rxq && eth_vhost_install_intr(eth_dev) < 0) { @@ -1150,6 +1151,11 @@ eth_dev_start(struct rte_eth_dev *eth_dev) rte_atomic32_set(&internal->started, 1); update_queuing_status(eth_dev, false); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } @@ -1157,11 +1163,17 @@ static int eth_dev_stop(struct rte_eth_dev *dev) { struct pmd_internal *internal = dev->data->dev_private; + uint16_t i; dev->data->dev_started = 0; rte_atomic32_set(&internal->started, 0); update_queuing_status(dev, true); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -1299,6 +1311,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned i; unsigned long rx_total = 0, tx_total = 0; unsigned long rx_total_bytes = 0, tx_total_bytes = 0; + unsigned long tx_total_errors = 0; struct vhost_queue *vq; for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && @@ -1323,12 +1336,15 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->q_obytes[i] = vq->stats.bytes; tx_total_bytes += stats->q_obytes[i]; + + tx_total_errors += vq->stats.missed_pkts; } stats->ipackets = rx_total; stats->opackets = tx_total; stats->ibytes = rx_total_bytes; stats->obytes = tx_total_bytes; + stats->oerrors = tx_total_errors; return 0; } diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 2c23f1c00e2..3ab56ef769c 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -2417,6 +2417,11 @@ virtio_dev_start(struct rte_eth_dev *dev) set_rxtx_funcs(dev); hw->started = 1; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + /* Initialize Link state */ virtio_dev_link_update(dev, 0); @@ -2506,6 +2511,7 @@ virtio_dev_stop(struct rte_eth_dev *dev) struct virtio_hw *hw = dev->data->dev_private; struct rte_eth_link link; struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; + uint16_t i; PMD_INIT_LOG(DEBUG, "stop"); dev->data->dev_started = 0; @@ -2533,6 +2539,11 @@ virtio_dev_stop(struct rte_eth_dev *dev) out_unlock: rte_spinlock_unlock(&hw->state_lock); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c index 29eb739b040..90bbb53502c 100644 --- a/drivers/net/virtio/virtio_pci.c +++ b/drivers/net/virtio/virtio_pci.c @@ -15,15 +15,6 @@ #include "virtio_logs.h" #include "virtqueue.h" -/* - * Following macros are derived from linux/pci_regs.h, however, - * we can't simply include that header here, as there is no such - * file for non-Linux platform. - */ -#define PCI_CAPABILITY_LIST 0x34 -#define PCI_CAP_ID_VNDR 0x09 -#define PCI_CAP_ID_MSIX 0x11 - /* * The remaining space is defined by each driver as the per-driver * configuration space. @@ -33,51 +24,19 @@ struct virtio_pci_internal virtio_pci_internal[RTE_MAX_ETHPORTS]; -#define PCI_MSIX_ENABLE 0x8000 - static enum virtio_msix_status vtpci_msix_detect(struct rte_pci_device *dev) { - uint8_t pos; - int ret; - - ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); - if (ret != 1) { - PMD_INIT_LOG(DEBUG, - "failed to read pci capability list, ret %d", ret); - return VIRTIO_MSIX_NONE; - } - - while (pos) { - uint8_t cap[2]; - - ret = rte_pci_read_config(dev, cap, sizeof(cap), pos); - if (ret != sizeof(cap)) { - PMD_INIT_LOG(DEBUG, - "failed to read pci cap at pos: %x ret %d", - pos, ret); - break; - } - - if (cap[0] == PCI_CAP_ID_MSIX) { - uint16_t flags; - - ret = rte_pci_read_config(dev, &flags, sizeof(flags), - pos + sizeof(cap)); - if (ret != sizeof(flags)) { - PMD_INIT_LOG(DEBUG, - "failed to read pci cap at pos:" - " %x ret %d", pos + 2, ret); - break; - } - - if (flags & PCI_MSIX_ENABLE) - return VIRTIO_MSIX_ENABLED; - else - return VIRTIO_MSIX_DISABLED; - } + uint16_t flags; + off_t pos; - pos = cap[1]; + pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX); + if (pos > 0 && rte_pci_read_config(dev, &flags, sizeof(flags), + pos + RTE_PCI_MSIX_FLAGS) == sizeof(flags)) { + if (flags & RTE_PCI_MSIX_FLAGS_ENABLE) + return VIRTIO_MSIX_ENABLED; + else + return VIRTIO_MSIX_DISABLED; } return VIRTIO_MSIX_NONE; @@ -623,8 +582,8 @@ static int virtio_read_caps(struct rte_pci_device *pci_dev, struct virtio_hw *hw) { struct virtio_pci_dev *dev = virtio_pci_get_dev(hw); - uint8_t pos; struct virtio_pci_cap cap; + off_t pos; int ret; if (rte_pci_map_device(pci_dev)) { @@ -632,72 +591,27 @@ virtio_read_caps(struct rte_pci_device *pci_dev, struct virtio_hw *hw) return -1; } - ret = rte_pci_read_config(pci_dev, &pos, 1, PCI_CAPABILITY_LIST); - if (ret != 1) { - PMD_INIT_LOG(DEBUG, - "failed to read pci capability list, ret %d", ret); - return -1; - } - - while (pos) { - ret = rte_pci_read_config(pci_dev, &cap, 2, pos); - if (ret != 2) { - PMD_INIT_LOG(DEBUG, - "failed to read pci cap at pos: %x ret %d", - pos, ret); - break; - } - - if (cap.cap_vndr == PCI_CAP_ID_MSIX) { - /* Transitional devices would also have this capability, - * that's why we also check if msix is enabled. - * 1st byte is cap ID; 2nd byte is the position of next - * cap; next two bytes are the flags. - */ - uint16_t flags; - - ret = rte_pci_read_config(pci_dev, &flags, sizeof(flags), - pos + 2); - if (ret != sizeof(flags)) { - PMD_INIT_LOG(DEBUG, - "failed to read pci cap at pos:" - " %x ret %d", pos + 2, ret); - break; - } - - if (flags & PCI_MSIX_ENABLE) - dev->msix_status = VIRTIO_MSIX_ENABLED; - else - dev->msix_status = VIRTIO_MSIX_DISABLED; - } - - if (cap.cap_vndr != PCI_CAP_ID_VNDR) { - PMD_INIT_LOG(DEBUG, - "[%2x] skipping non VNDR cap id: %02x", - pos, cap.cap_vndr); - goto next; - } + /* + * Transitional devices would also have this capability, + * that's why we also check if msix is enabled. + */ + dev->msix_status = vtpci_msix_detect(pci_dev); - ret = rte_pci_read_config(pci_dev, &cap, sizeof(cap), pos); - if (ret != sizeof(cap)) { - PMD_INIT_LOG(DEBUG, - "failed to read pci cap at pos: %x ret %d", - pos, ret); + pos = rte_pci_find_capability(pci_dev, RTE_PCI_CAP_ID_VNDR); + while (pos > 0) { + if (rte_pci_read_config(pci_dev, &cap, sizeof(cap), pos) != sizeof(cap)) break; - } - PMD_INIT_LOG(DEBUG, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", - pos, cap.cfg_type, cap.bar, cap.offset, cap.length); + (unsigned int)pos, cap.cfg_type, cap.bar, cap.offset, cap.length); switch (cap.cfg_type) { case VIRTIO_PCI_CAP_COMMON_CFG: dev->common_cfg = get_cfg_addr(pci_dev, &cap); break; case VIRTIO_PCI_CAP_NOTIFY_CFG: - ret = rte_pci_read_config(pci_dev, - &dev->notify_off_multiplier, - 4, pos + sizeof(cap)); + ret = rte_pci_read_config(pci_dev, &dev->notify_off_multiplier, + 4, pos + sizeof(cap)); if (ret != 4) PMD_INIT_LOG(DEBUG, "failed to read notify_off_multiplier, ret %d", @@ -713,8 +627,7 @@ virtio_read_caps(struct rte_pci_device *pci_dev, struct virtio_hw *hw) break; } -next: - pos = cap.cap_next; + pos = rte_pci_find_next_capability(pci_dev, RTE_PCI_CAP_ID_VNDR, pos); } if (dev->common_cfg == NULL || dev->notify_base == NULL || diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index e48ff3cca72..99b95194cdd 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -94,7 +94,7 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) uint32_t bin; /* count zeros, and offset into correct bin */ - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + bin = (sizeof(s) * 8) - rte_clz32(s) - 5; stats->size_bins[bin]++; } else { if (s < 64) diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c index 1d836f25303..6f419665f1f 100644 --- a/drivers/net/virtio/virtqueue.c +++ b/drivers/net/virtio/virtqueue.c @@ -469,9 +469,11 @@ virtqueue_alloc(struct virtio_hw *hw, uint16_t index, uint16_t num, int type, if (hw->use_va) { vq->vq_ring_mem = (uintptr_t)mz->addr; vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr); + vq->mbuf_addr_mask = UINTPTR_MAX; } else { vq->vq_ring_mem = mz->iova; vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova); + vq->mbuf_addr_mask = UINT64_MAX; } PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem); diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 9d4aba11a31..c1cb941c431 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -114,17 +114,26 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, #define VIRTQUEUE_MAX_NAME_SZ 32 +#ifdef RTE_ARCH_32 +#define VIRTIO_MBUF_ADDR_MASK(vq) ((vq)->mbuf_addr_mask) +#else +#define VIRTIO_MBUF_ADDR_MASK(vq) UINT64_MAX +#endif + /** * Return the IOVA (or virtual address in case of virtio-user) of mbuf * data buffer. * * The address is firstly casted to the word size (sizeof(uintptr_t)) - * before casting it to uint64_t. This is to make it work with different - * combination of word size (64 bit and 32 bit) and virtio device - * (virtio-pci and virtio-user). + * before casting it to uint64_t. It is then masked with the expected + * address length (64 bits for virtio-pci, word size for virtio-user). + * + * This is to make it work with different combination of word size (64 + * bit and 32 bit) and virtio device (virtio-pci and virtio-user). */ #define VIRTIO_MBUF_ADDR(mb, vq) \ - ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset))) + ((*(uint64_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)) & \ + VIRTIO_MBUF_ADDR_MASK(vq)) /** * Return the physical address (or virtual address in case of @@ -194,6 +203,7 @@ struct virtqueue { void *vq_ring_virt_mem; /**< linear address of vring*/ unsigned int vq_ring_size; uint16_t mbuf_addr_offset; + uint64_t mbuf_addr_mask; union { struct virtnet_rx rxq; diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 76e80e30251..e49191718ae 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -1048,6 +1048,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) { int ret; struct vmxnet3_hw *hw = dev->data->dev_private; + uint16_t i; PMD_INIT_FUNC_TRACE(); @@ -1151,6 +1152,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) */ __vmxnet3_dev_link_update(dev, 0); + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return VMXNET3_SUCCESS; } @@ -1163,6 +1169,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct vmxnet3_hw *hw = dev->data->dev_private; struct rte_intr_handle *intr_handle = dev->intr_handle; + uint16_t i; int ret; PMD_INIT_FUNC_TRACE(); @@ -1218,6 +1225,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) hw->adapter_stopped = 1; dev->data->dev_started = 0; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } diff --git a/drivers/raw/cnxk_bphy/cnxk_bphy.c b/drivers/raw/cnxk_bphy/cnxk_bphy.c index d42cca649ce..15dbc4c1a63 100644 --- a/drivers/raw/cnxk_bphy/cnxk_bphy.c +++ b/drivers/raw/cnxk_bphy/cnxk_bphy.c @@ -359,10 +359,12 @@ bphy_rawdev_probe(struct rte_pci_driver *pci_drv, bphy_dev->mem.res2 = pci_dev->mem_resource[2]; bphy_dev->bphy.pci_dev = pci_dev; - ret = roc_bphy_dev_init(&bphy_dev->bphy); - if (ret) { - rte_rawdev_pmd_release(bphy_rawdev); - return ret; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + ret = roc_bphy_dev_init(&bphy_dev->bphy); + if (ret) { + rte_rawdev_pmd_release(bphy_rawdev); + return ret; + } } return 0; @@ -390,8 +392,10 @@ bphy_rawdev_remove(struct rte_pci_device *pci_dev) return -EINVAL; } - bphy_dev = (struct bphy_device *)rawdev->dev_private; - roc_bphy_dev_fini(&bphy_dev->bphy); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + bphy_dev = (struct bphy_device *)rawdev->dev_private; + roc_bphy_dev_fini(&bphy_dev->bphy); + } return rte_rawdev_pmd_release(rawdev); } diff --git a/drivers/raw/ifpga/base/opae_osdep.h b/drivers/raw/ifpga/base/opae_osdep.h index 033b7e098c6..e91205f536c 100644 --- a/drivers/raw/ifpga/base/opae_osdep.h +++ b/drivers/raw/ifpga/base/opae_osdep.h @@ -7,6 +7,7 @@ #include #include +#include #ifdef RTE_LIB_EAL #include "osdep_rte/osdep_generic.h" diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c index b9f6f432c8a..f89bd3f9e2c 100644 --- a/drivers/raw/ifpga/ifpga_rawdev.c +++ b/drivers/raw/ifpga/ifpga_rawdev.c @@ -74,7 +74,7 @@ static const struct rte_pci_id pci_ifpga_map[] = { static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM]; static int ifpga_monitor_refcnt; -static pthread_t ifpga_monitor_start_thread; +static rte_thread_t ifpga_monitor_start_thread; static struct ifpga_rawdev * ifpga_rawdev_allocate(struct rte_rawdev *rawdev); @@ -504,7 +504,7 @@ static int set_surprise_link_check_aer( return -EFAULT; } -static void * +static uint32_t ifpga_rawdev_gsd_handle(__rte_unused void *param) { struct ifpga_rawdev *ifpga_rdev; @@ -532,7 +532,7 @@ ifpga_rawdev_gsd_handle(__rte_unused void *param) rte_delay_us(100 * MS); } - return NULL; + return 0; } static int @@ -550,11 +550,10 @@ ifpga_monitor_start_func(struct ifpga_rawdev *dev) dev->poll_enabled = 1; if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) { - ret = rte_ctrl_thread_create(&ifpga_monitor_start_thread, - "ifpga-monitor", NULL, - ifpga_rawdev_gsd_handle, NULL); + ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread, + "ifpga-mon", ifpga_rawdev_gsd_handle, NULL); if (ret != 0) { - ifpga_monitor_start_thread = 0; + ifpga_monitor_start_thread.opaque_id = 0; IFPGA_RAWDEV_PMD_ERR( "Fail to create ifpga monitor thread"); return -1; @@ -575,12 +574,12 @@ ifpga_monitor_stop_func(struct ifpga_rawdev *dev) dev->poll_enabled = 0; if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) && - ifpga_monitor_start_thread) { - ret = pthread_cancel(ifpga_monitor_start_thread); + ifpga_monitor_start_thread.opaque_id != 0) { + ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id); if (ret) IFPGA_RAWDEV_PMD_ERR("Can't cancel the thread"); - ret = pthread_join(ifpga_monitor_start_thread, NULL); + ret = rte_thread_join(ifpga_monitor_start_thread, NULL); if (ret) IFPGA_RAWDEV_PMD_ERR("Can't join the thread"); diff --git a/drivers/regex/cn9k/meson.build b/drivers/regex/cn9k/meson.build index 44b12b98455..b22275928ac 100644 --- a/drivers/regex/cn9k/meson.build +++ b/drivers/regex/cn9k/meson.build @@ -12,5 +12,5 @@ sources = files( 'cn9k_regexdev.c', ) -deps += ['bus_pci', 'regexdev'] +deps += ['bus_pci'] deps += ['common_cnxk', 'mempool_cnxk'] diff --git a/drivers/regex/meson.build b/drivers/regex/meson.build index 7ad55af8ca4..ff2a8fea89d 100644 --- a/drivers/regex/meson.build +++ b/drivers/regex/meson.build @@ -5,4 +5,4 @@ drivers = [ 'mlx5', 'cn9k', ] -std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc +std_deps = ['ethdev', 'kvargs', 'regexdev'] # 'ethdev' also pulls in mbuf, net, eal etc diff --git a/drivers/regex/mlx5/meson.build b/drivers/regex/mlx5/meson.build index 0f4ca46f441..6070b375012 100644 --- a/drivers/regex/mlx5/meson.build +++ b/drivers/regex/mlx5/meson.build @@ -7,7 +7,7 @@ if not is_linux subdir_done() endif -deps += ['common_mlx5', 'eal', 'regexdev'] +deps += ['common_mlx5', 'eal'] if not dpdk_conf.has('RTE_COMMON_MLX5') # avoid referencing undefined variables from common/mlx5 subdir_done() diff --git a/drivers/vdpa/ifc/base/ifcvf_osdep.h b/drivers/vdpa/ifc/base/ifcvf_osdep.h index 6444d7f72c4..dd2ff08f77c 100644 --- a/drivers/vdpa/ifc/base/ifcvf_osdep.h +++ b/drivers/vdpa/ifc/base/ifcvf_osdep.h @@ -6,7 +6,6 @@ #define _IFCVF_OSDEP_H_ #include -#include #include #include @@ -35,6 +34,9 @@ typedef struct rte_pci_device PCI_DEV; #define PCI_READ_CONFIG_DWORD(dev, val, where) \ rte_pci_read_config(dev, val, 4, where) +#define PCI_CAPABILITY_LIST RTE_PCI_CAPABILITY_LIST +#define PCI_CAP_ID_VNDR RTE_PCI_CAP_ID_VNDR + typedef uint8_t u8; typedef int8_t s8; typedef uint16_t u16; diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index e4133568c1a..f034bd59ba5 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -58,8 +58,8 @@ struct ifcvf_internal { int vfio_container_fd; int vfio_group_fd; int vfio_dev_fd; - pthread_t tid; /* thread for notify relay */ - pthread_t intr_tid; /* thread for config space change interrupt relay */ + rte_thread_t tid; /* thread for notify relay */ + rte_thread_t intr_tid; /* thread for config space change interrupt relay */ int epfd; int csc_epfd; int vid; @@ -496,7 +496,7 @@ vdpa_disable_vfio_intr(struct ifcvf_internal *internal) return 0; } -static void * +static uint32_t notify_relay(void *arg) { int i, kickfd, epfd, nfds = 0; @@ -514,7 +514,7 @@ notify_relay(void *arg) epfd = epoll_create(IFCVF_MAX_QUEUES * 2); if (epfd < 0) { DRV_LOG(ERR, "failed to create epoll instance."); - return NULL; + return 1; } internal->epfd = epfd; @@ -527,7 +527,7 @@ notify_relay(void *arg) ev.data.u64 = qid | (uint64_t)vring.kickfd << 32; if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) { DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); - return NULL; + return 1; } } @@ -537,7 +537,7 @@ notify_relay(void *arg) if (errno == EINTR) continue; DRV_LOG(ERR, "epoll_wait return fail\n"); - return NULL; + return 1; } for (i = 0; i < nfds; i++) { @@ -561,18 +561,18 @@ notify_relay(void *arg) } } - return NULL; + return 0; } static int setup_notify_relay(struct ifcvf_internal *internal) { - char name[THREAD_NAME_LEN]; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; int ret; - snprintf(name, sizeof(name), "ifc-notify-%d", internal->vid); - ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay, - (void *)internal); + snprintf(name, sizeof(name), "ifc-noti%d", internal->vid); + ret = rte_thread_create_internal_control(&internal->tid, name, + notify_relay, internal); if (ret != 0) { DRV_LOG(ERR, "failed to create notify relay pthread."); return -1; @@ -584,13 +584,11 @@ setup_notify_relay(struct ifcvf_internal *internal) static int unset_notify_relay(struct ifcvf_internal *internal) { - void *status; - - if (internal->tid) { - pthread_cancel(internal->tid); - pthread_join(internal->tid, &status); + if (internal->tid.opaque_id != 0) { + pthread_cancel((pthread_t)internal->tid.opaque_id); + rte_thread_join(internal->tid, NULL); } - internal->tid = 0; + internal->tid.opaque_id = 0; if (internal->epfd >= 0) close(internal->epfd); @@ -610,7 +608,7 @@ virtio_interrupt_handler(struct ifcvf_internal *internal) DRV_LOG(ERR, "failed to notify the guest about configuration space change."); } -static void * +static uint32_t intr_relay(void *arg) { struct ifcvf_internal *internal = (struct ifcvf_internal *)arg; @@ -623,7 +621,7 @@ intr_relay(void *arg) csc_epfd = epoll_create(1); if (csc_epfd < 0) { DRV_LOG(ERR, "failed to create epoll for config space change."); - return NULL; + return 1; } ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP; @@ -672,18 +670,18 @@ intr_relay(void *arg) close(csc_epfd); internal->csc_epfd = -1; - return NULL; + return 0; } static int setup_intr_relay(struct ifcvf_internal *internal) { - char name[THREAD_NAME_LEN]; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; int ret; - snprintf(name, sizeof(name), "ifc-intr-%d", internal->vid); - ret = rte_ctrl_thread_create(&internal->intr_tid, name, NULL, - intr_relay, (void *)internal); + snprintf(name, sizeof(name), "ifc-int%d", internal->vid); + ret = rte_thread_create_internal_control(&internal->intr_tid, name, + intr_relay, (void *)internal); if (ret) { DRV_LOG(ERR, "failed to create notify relay pthread."); return -1; @@ -694,13 +692,11 @@ setup_intr_relay(struct ifcvf_internal *internal) static void unset_intr_relay(struct ifcvf_internal *internal) { - void *status; - - if (internal->intr_tid) { - pthread_cancel(internal->intr_tid); - pthread_join(internal->intr_tid, &status); + if (internal->intr_tid.opaque_id != 0) { + pthread_cancel((pthread_t)internal->intr_tid.opaque_id); + rte_thread_join(internal->intr_tid, NULL); } - internal->intr_tid = 0; + internal->intr_tid.opaque_id = 0; if (internal->csc_epfd >= 0) close(internal->csc_epfd); @@ -922,7 +918,7 @@ update_used_ring(struct ifcvf_internal *internal, uint16_t qid) rte_vhost_vring_call(internal->vid, qid); } -static void * +static uint32_t vring_relay(void *arg) { int i, vid, epfd, fd, nfds; @@ -941,7 +937,7 @@ vring_relay(void *arg) epfd = epoll_create(IFCVF_MAX_QUEUES * 2); if (epfd < 0) { DRV_LOG(ERR, "failed to create epoll instance."); - return NULL; + return 1; } internal->epfd = epfd; @@ -952,7 +948,7 @@ vring_relay(void *arg) ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32; if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) { DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); - return NULL; + return 1; } } @@ -966,7 +962,7 @@ vring_relay(void *arg) if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev) < 0) { DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); - return NULL; + return 1; } update_used_ring(internal, qid); } @@ -982,7 +978,7 @@ vring_relay(void *arg) if (errno == EINTR) continue; DRV_LOG(ERR, "epoll_wait return fail."); - return NULL; + return 1; } for (i = 0; i < nfds; i++) { @@ -1010,18 +1006,18 @@ vring_relay(void *arg) } } - return NULL; + return 0; } static int setup_vring_relay(struct ifcvf_internal *internal) { - char name[THREAD_NAME_LEN]; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; int ret; - snprintf(name, sizeof(name), "ifc-vring-%d", internal->vid); - ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay, - (void *)internal); + snprintf(name, sizeof(name), "ifc-ring%d", internal->vid); + ret = rte_thread_create_internal_control(&internal->tid, name, + vring_relay, internal); if (ret != 0) { DRV_LOG(ERR, "failed to create ring relay pthread."); return -1; @@ -1033,13 +1029,11 @@ setup_vring_relay(struct ifcvf_internal *internal) static int unset_vring_relay(struct ifcvf_internal *internal) { - void *status; - - if (internal->tid) { - pthread_cancel(internal->tid); - pthread_join(internal->tid, &status); + if (internal->tid.opaque_id != 0) { + pthread_cancel((pthread_t)internal->tid.opaque_id); + rte_thread_join(internal->tid, NULL); } - internal->tid = 0; + internal->tid.opaque_id = 0; if (internal->epfd >= 0) close(internal->epfd); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index f1737f82a8a..f900384aeb2 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -282,6 +282,7 @@ _internal_mlx5_vdpa_dev_close(struct mlx5_vdpa_priv *priv, int ret = 0; int vid = priv->vid; + mlx5_vdpa_virtq_unreg_intr_handle_all(priv); mlx5_vdpa_cqe_event_unset(priv); if (priv->state == MLX5_VDPA_STATE_CONFIGURED) { ret |= mlx5_vdpa_lm_log(priv); @@ -844,7 +845,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev, mlx5_vdpa_config_get(mkvlist, priv); if (priv->use_c_thread) { if (conf_thread_mng.initializer_priv == priv) - if (mlx5_vdpa_mult_threads_create(priv->event_core)) + if (mlx5_vdpa_mult_threads_create()) goto error; __atomic_fetch_add(&conf_thread_mng.refcnt, 1, __ATOMIC_RELAXED); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index dc4dfba5ed5..7b37c98e748 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -19,6 +19,7 @@ #endif #include #include +#include #include #include @@ -99,7 +100,7 @@ struct mlx5_vdpa_task { /* Generic mlx5_vdpa_c_thread information. */ struct mlx5_vdpa_c_thread { - pthread_t tid; + rte_thread_t tid; struct rte_ring *rng; pthread_cond_t c_cond; }; @@ -182,7 +183,7 @@ struct mlx5_vdpa_priv { rte_spinlock_t db_lock; pthread_mutex_t steer_update_lock; uint64_t no_traffic_counter; - pthread_t timer_tid; + rte_thread_t timer_tid; int event_mode; int event_core; /* Event thread cpu affinity core. */ uint32_t event_us; @@ -563,14 +564,11 @@ mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv); /** * Create configuration multi-threads resource * - * @param[in] cpu_core - * CPU core number to set configuration threads affinity to. - * * @return * 0 on success, a negative value otherwise. */ int -mlx5_vdpa_mult_threads_create(int cpu_core); +mlx5_vdpa_mult_threads_create(void); /** * Destroy configuration multi-threads resource diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c index 6e6624e5a3c..68ed841efc0 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c @@ -96,11 +96,10 @@ mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt, return false; } -static void * +static uint32_t mlx5_vdpa_c_thread_handle(void *arg) { struct mlx5_vdpa_conf_thread_mng *multhrd = arg; - pthread_t thread_id = pthread_self(); struct mlx5_vdpa_virtq *virtq; struct mlx5_vdpa_priv *priv; struct mlx5_vdpa_task task; @@ -112,10 +111,10 @@ mlx5_vdpa_c_thread_handle(void *arg) for (thrd_idx = 0; thrd_idx < multhrd->max_thrds; thrd_idx++) - if (multhrd->cthrd[thrd_idx].tid == thread_id) + if (rte_thread_equal(multhrd->cthrd[thrd_idx].tid, rte_thread_self())) break; if (thrd_idx >= multhrd->max_thrds) - return NULL; + return 1; rng = multhrd->cthrd[thrd_idx].rng; while (1) { task_num = mlx5_vdpa_c_thrd_ring_dequeue_bulk(rng, @@ -190,7 +189,6 @@ mlx5_vdpa_c_thread_handle(void *arg) pthread_mutex_unlock(&virtq->virtq_lock); break; case MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT: - mlx5_vdpa_virtq_unreg_intr_handle_all(priv); pthread_mutex_lock(&priv->steer_update_lock); mlx5_vdpa_steer_unset(priv); pthread_mutex_unlock(&priv->steer_update_lock); @@ -227,16 +225,17 @@ mlx5_vdpa_c_thread_handle(void *arg) __atomic_fetch_sub(task.remaining_cnt, 1, __ATOMIC_RELAXED); } - return NULL; + return 0; } static void mlx5_vdpa_c_thread_destroy(uint32_t thrd_idx, bool need_unlock) { - if (conf_thread_mng.cthrd[thrd_idx].tid) { - pthread_cancel(conf_thread_mng.cthrd[thrd_idx].tid); - pthread_join(conf_thread_mng.cthrd[thrd_idx].tid, NULL); - conf_thread_mng.cthrd[thrd_idx].tid = 0; + pthread_t *tid = (pthread_t *)&conf_thread_mng.cthrd[thrd_idx].tid.opaque_id; + if (*tid != 0) { + pthread_cancel(*tid); + rte_thread_join(conf_thread_mng.cthrd[thrd_idx].tid, NULL); + *tid = 0; if (need_unlock) pthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL); } @@ -247,30 +246,14 @@ mlx5_vdpa_c_thread_destroy(uint32_t thrd_idx, bool need_unlock) } static int -mlx5_vdpa_c_thread_create(int cpu_core) +mlx5_vdpa_c_thread_create(void) { - const struct sched_param sp = { - .sched_priority = sched_get_priority_max(SCHED_RR), - }; - rte_cpuset_t cpuset; - pthread_attr_t attr; uint32_t thrd_idx; uint32_t ring_num; - char name[32]; + char name[RTE_RING_NAMESIZE]; int ret; pthread_mutex_lock(&conf_thread_mng.cthrd_lock); - pthread_attr_init(&attr); - ret = pthread_attr_setschedpolicy(&attr, SCHED_RR); - if (ret) { - DRV_LOG(ERR, "Failed to set thread sched policy = RR."); - goto c_thread_err; - } - ret = pthread_attr_setschedparam(&attr, &sp); - if (ret) { - DRV_LOG(ERR, "Failed to set thread priority."); - goto c_thread_err; - } ring_num = MLX5_VDPA_MAX_TASKS_PER_THRD / conf_thread_mng.max_thrds; if (!ring_num) { DRV_LOG(ERR, "Invalid ring number for thread."); @@ -291,35 +274,15 @@ mlx5_vdpa_c_thread_create(int cpu_core) thrd_idx); goto c_thread_err; } - ret = pthread_create(&conf_thread_mng.cthrd[thrd_idx].tid, - &attr, mlx5_vdpa_c_thread_handle, - (void *)&conf_thread_mng); + snprintf(name, RTE_THREAD_INTERNAL_NAME_SIZE, "vmlx5-c%d", thrd_idx); + ret = rte_thread_create_internal_control(&conf_thread_mng.cthrd[thrd_idx].tid, + name, + mlx5_vdpa_c_thread_handle, &conf_thread_mng); if (ret) { DRV_LOG(ERR, "Failed to create vdpa multi-threads %d.", thrd_idx); goto c_thread_err; } - CPU_ZERO(&cpuset); - if (cpu_core != -1) - CPU_SET(cpu_core, &cpuset); - else - cpuset = rte_lcore_cpuset(rte_get_main_lcore()); - ret = pthread_setaffinity_np( - conf_thread_mng.cthrd[thrd_idx].tid, - sizeof(cpuset), &cpuset); - if (ret) { - DRV_LOG(ERR, "Failed to set thread affinity for " - "vdpa multi-threads %d.", thrd_idx); - goto c_thread_err; - } - snprintf(name, sizeof(name), "vDPA-mthread-%d", thrd_idx); - ret = pthread_setname_np( - conf_thread_mng.cthrd[thrd_idx].tid, name); - if (ret) - DRV_LOG(ERR, "Failed to set vdpa multi-threads name %s.", - name); - else - DRV_LOG(DEBUG, "Thread name: %s.", name); pthread_cond_init(&conf_thread_mng.cthrd[thrd_idx].c_cond, NULL); } @@ -334,10 +297,10 @@ mlx5_vdpa_c_thread_create(int cpu_core) } int -mlx5_vdpa_mult_threads_create(int cpu_core) +mlx5_vdpa_mult_threads_create(void) { pthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL); - if (mlx5_vdpa_c_thread_create(cpu_core)) { + if (mlx5_vdpa_c_thread_create()) { DRV_LOG(ERR, "Cannot create vDPA configuration threads."); mlx5_vdpa_mult_threads_destroy(false); return -1; diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c index f3d392c73d2..fa26471b183 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c @@ -284,7 +284,7 @@ mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused) return NULL; } -static void * +static uint32_t mlx5_vdpa_event_handle(void *arg) { struct mlx5_vdpa_priv *priv = arg; @@ -324,7 +324,7 @@ mlx5_vdpa_event_handle(void *arg) } mlx5_vdpa_timer_sleep(priv, max); } - return NULL; + return 0; case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT: do { virtq = mlx5_vdpa_event_wait(priv); @@ -336,9 +336,9 @@ mlx5_vdpa_event_handle(void *arg) pthread_mutex_unlock(&virtq->virtq_lock); } } while (1); - return NULL; + return 0; default: - return NULL; + return 0; } } @@ -503,54 +503,31 @@ int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv) { int ret; - rte_cpuset_t cpuset; - pthread_attr_t *attrp = NULL; - pthread_attr_t attr; - char name[16]; - const struct sched_param sp = { - .sched_priority = sched_get_priority_max(SCHED_RR) - 1, - }; + rte_thread_attr_t attr; + char name[RTE_THREAD_INTERNAL_NAME_SIZE]; if (!priv->eventc) /* All virtqs are in poll mode. */ return 0; - ret = pthread_attr_init(&attr); + ret = rte_thread_attr_init(&attr); if (ret != 0) { DRV_LOG(ERR, "Failed to initialize thread attributes"); goto out; } - attrp = &attr; - ret = pthread_attr_setschedpolicy(attrp, SCHED_RR); - if (ret) { - DRV_LOG(ERR, "Failed to set thread sched policy = RR."); - goto out; - } - ret = pthread_attr_setschedparam(attrp, &sp); - if (ret) { - DRV_LOG(ERR, "Failed to set thread priority."); - goto out; - } - ret = pthread_create(&priv->timer_tid, attrp, mlx5_vdpa_event_handle, - (void *)priv); - if (ret) { - DRV_LOG(ERR, "Failed to create timer thread."); - goto out; - } - CPU_ZERO(&cpuset); + attr.priority = RTE_THREAD_PRIORITY_REALTIME_CRITICAL; if (priv->event_core != -1) - CPU_SET(priv->event_core, &cpuset); + CPU_SET(priv->event_core, &attr.cpuset); else - cpuset = rte_lcore_cpuset(rte_get_main_lcore()); - ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset); - if (ret) { - DRV_LOG(ERR, "Failed to set thread affinity."); + attr.cpuset = rte_lcore_cpuset(rte_get_main_lcore()); + ret = rte_thread_create(&priv->timer_tid, + &attr, mlx5_vdpa_event_handle, priv); + if (ret != 0) { + DRV_LOG(ERR, "Failed to create timer thread."); goto out; } - snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid); - rte_thread_set_name((rte_thread_t){(uintptr_t)priv->timer_tid}, name); + snprintf(name, sizeof(name), "vmlx5-%d", priv->vid); + rte_thread_set_prefixed_name(priv->timer_tid, name); out: - if (attrp != NULL) - pthread_attr_destroy(attrp); if (ret != 0) return -1; return 0; @@ -560,19 +537,18 @@ void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv) { struct mlx5_vdpa_virtq *virtq; - void *status; int i; - if (priv->timer_tid) { - pthread_cancel(priv->timer_tid); - pthread_join(priv->timer_tid, &status); + if (priv->timer_tid.opaque_id != 0) { + pthread_cancel((pthread_t)priv->timer_tid.opaque_id); + rte_thread_join(priv->timer_tid, NULL); /* The mutex may stay locked after event thread cancel, initiate it. */ for (i = 0; i < priv->nr_virtqs; i++) { virtq = &priv->virtqs[i]; pthread_mutex_init(&virtq->virtq_lock, NULL); } } - priv->timer_tid = 0; + priv->timer_tid.opaque_id = 0; } void diff --git a/drivers/vdpa/sfc/sfc_vdpa.c b/drivers/vdpa/sfc/sfc_vdpa.c index bd6ecde4507..9db87fcb7d4 100644 --- a/drivers/vdpa/sfc/sfc_vdpa.c +++ b/drivers/vdpa/sfc/sfc_vdpa.c @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c index f63af7d478e..00f9a4b04c7 100644 --- a/drivers/vdpa/sfc/sfc_vdpa_ops.c +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c @@ -567,7 +567,7 @@ sfc_vdpa_get_protocol_features(struct rte_vdpa_device *vdpa_dev, return 0; } -static void * +static uint32_t sfc_vdpa_notify_ctrl(void *arg) { struct sfc_vdpa_ops_data *ops_data; @@ -575,7 +575,7 @@ sfc_vdpa_notify_ctrl(void *arg) ops_data = arg; if (ops_data == NULL) - return NULL; + return 0; sfc_vdpa_adapter_lock(sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)); @@ -588,7 +588,7 @@ sfc_vdpa_notify_ctrl(void *arg) sfc_vdpa_adapter_unlock(sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)); - return NULL; + return 0; } static int @@ -603,8 +603,8 @@ sfc_vdpa_setup_notify_ctrl(struct sfc_vdpa_ops_data *ops_data) * dead lock scenario when multiple VFs are used in single vdpa * application and multiple VFs are passed to a single VM. */ - ret = pthread_create(&ops_data->notify_tid, NULL, - sfc_vdpa_notify_ctrl, ops_data); + ret = rte_thread_create_internal_control(&ops_data->notify_tid, + "sfc-vdpa", sfc_vdpa_notify_ctrl, ops_data); if (ret != 0) { sfc_vdpa_err(ops_data->dev_handle, "failed to create notify_ctrl thread: %s", @@ -690,15 +690,14 @@ sfc_vdpa_dev_close(int vid) sfc_vdpa_adapter_lock(sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)); if (ops_data->is_notify_thread_started == true) { - void *status; - ret = pthread_cancel(ops_data->notify_tid); + ret = pthread_cancel((pthread_t)ops_data->notify_tid.opaque_id); if (ret != 0) { sfc_vdpa_err(ops_data->dev_handle, "failed to cancel notify_ctrl thread: %s", rte_strerror(ret)); } - ret = pthread_join(ops_data->notify_tid, &status); + ret = rte_thread_join(ops_data->notify_tid, NULL); if (ret != 0) { sfc_vdpa_err(ops_data->dev_handle, "failed to join terminated notify_ctrl thread: %s", diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h index 5c8e352de36..10ea2394347 100644 --- a/drivers/vdpa/sfc/sfc_vdpa_ops.h +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h @@ -6,6 +6,7 @@ #define _SFC_VDPA_OPS_H #include +#include #define SFC_VDPA_MAX_QUEUE_PAIRS 8 @@ -48,7 +49,7 @@ struct sfc_vdpa_ops_data { struct rte_vdpa_device *vdpa_dev; enum sfc_vdpa_context vdpa_context; enum sfc_vdpa_state state; - pthread_t notify_tid; + rte_thread_t notify_tid; bool is_notify_thread_started; uint64_t dev_features; diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index 9b5aff85098..cb7e00ba34f 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -14,7 +14,7 @@ from enum import auto, unique from typing import Any, TypedDict, Union -import warlock # type: ignore +import warlock # type: ignore[import] import yaml from framework.settings import SETTINGS diff --git a/dts/framework/remote_session/remote/interactive_remote_session.py b/dts/framework/remote_session/remote/interactive_remote_session.py index e6383961838..9085a668e84 100644 --- a/dts/framework/remote_session/remote/interactive_remote_session.py +++ b/dts/framework/remote_session/remote/interactive_remote_session.py @@ -6,8 +6,8 @@ import socket import traceback -from paramiko import AutoAddPolicy, SSHClient, Transport # type: ignore -from paramiko.ssh_exception import ( # type: ignore +from paramiko import AutoAddPolicy, SSHClient, Transport # type: ignore[import] +from paramiko.ssh_exception import ( # type: ignore[import] AuthenticationException, BadHostKeyException, NoValidConnectionsError, diff --git a/dts/framework/remote_session/remote/interactive_shell.py b/dts/framework/remote_session/remote/interactive_shell.py index 5b94fc738f9..c24376b2a8d 100644 --- a/dts/framework/remote_session/remote/interactive_shell.py +++ b/dts/framework/remote_session/remote/interactive_shell.py @@ -16,7 +16,7 @@ from pathlib import PurePath from typing import Callable -from paramiko import Channel, SSHClient, channel # type: ignore +from paramiko import Channel, SSHClient, channel # type: ignore[import] from framework.logger import DTSLOG from framework.settings import SETTINGS diff --git a/dts/poetry.lock b/dts/poetry.lock index 8cb9920ec7e..f7b3b6d6027 100644 --- a/dts/poetry.lock +++ b/dts/poetry.lock @@ -1,24 +1,52 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + [[package]] name = "attrs" -version = "22.1.0" +version = "23.1.0" description = "Classes Without Boilerplate" -category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "bcrypt" version = "4.0.1" description = "Modern password hashing for your software and your servers" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, + {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, + {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, + {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, +] [package.extras] tests = ["pytest (>=3.2.1,!=3.3.0)"] @@ -26,11 +54,24 @@ typecheck = ["mypy"] [[package]] name = "black" -version = "22.10.0" +version = "22.12.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] [package.dependencies] click = ">=8.0.0" @@ -49,20 +90,88 @@ uvloop = ["uvloop (>=0.15.2)"] name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] [package.dependencies] pycparser = "*" [[package]] name = "click" -version = "8.1.3" +version = "8.1.6" description = "Composable command line interface toolkit" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"}, + {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"}, +] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -71,38 +180,68 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] [[package]] name = "cryptography" -version = "40.0.2" +version = "41.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711"}, + {file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83"}, + {file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5"}, + {file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58"}, + {file = "cryptography-41.0.2-cp37-abi3-win32.whl", hash = "sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76"}, + {file = "cryptography-41.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0"}, + {file = "cryptography-41.0.2.tar.gz", hash = "sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c"}, +] [package.dependencies] cffi = ">=1.12" [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] -pep8test = ["black", "ruff", "mypy", "check-manifest"] -sdist = ["setuptools-rust (>=0.11.4)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.2.0)", "pytest-shard (>=0.1.2)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -tox = ["tox"] [[package]] name = "fabric" version = "2.7.1" description = "High level SSH command execution" -category = "main" optional = false python-versions = "*" +files = [ + {file = "fabric-2.7.1-py2.py3-none-any.whl", hash = "sha256:7610362318ef2d391cc65d4befb684393975d889ed5720f23499394ec0e136fa"}, + {file = "fabric-2.7.1.tar.gz", hash = "sha256:76f8fef59cf2061dbd849bbce4fe49bdd820884385004b0ca59136ac3db129e4"}, +] [package.dependencies] invoke = ">=1.3,<2.0" @@ -117,74 +256,132 @@ testing = ["mock (>=2.0.0,<3.0)"] name = "invoke" version = "1.7.3" description = "Pythonic task execution" -category = "main" optional = false python-versions = "*" +files = [ + {file = "invoke-1.7.3-py3-none-any.whl", hash = "sha256:d9694a865764dd3fd91f25f7e9a97fb41666e822bbb00e670091e3f43933574d"}, + {file = "invoke-1.7.3.tar.gz", hash = "sha256:41b428342d466a82135d5ab37119685a989713742be46e42a3a399d685579314"}, +] [[package]] name = "isort" -version = "5.10.1" +version = "5.12.0" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false -python-versions = ">=3.6.1,<4.0" +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, + {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, +] [package.extras] -pipfile_deprecated_finder = ["pipreqs", "requirementslib"] -requirements_deprecated_finder = ["pipreqs", "pip-api"] -colors = ["colorama (>=0.4.3,<0.5.0)"] +colors = ["colorama (>=0.4.3)"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] +requirements-deprecated-finder = ["pip-api", "pipreqs"] [[package]] name = "jsonpatch" -version = "1.32" +version = "1.33" description = "Apply JSON-Patches (RFC 6902)" -category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, +] [package.dependencies] jsonpointer = ">=1.9" [[package]] name = "jsonpointer" -version = "2.3" +version = "2.4" description = "Identify specific nodes in a JSON document (RFC 6901)" -category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, +] [[package]] name = "jsonschema" -version = "4.17.0" +version = "4.18.4" description = "An implementation of JSON Schema validation for Python" -category = "main" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.18.4-py3-none-any.whl", hash = "sha256:971be834317c22daaa9132340a51c01b50910724082c2c1a2ac87eeec153a3fe"}, + {file = "jsonschema-4.18.4.tar.gz", hash = "sha256:fb3642735399fa958c0d2aad7057901554596c63349f4f6b283c493cf692a25d"}, +] [package.dependencies] -attrs = ">=17.4.0" -pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +[[package]] +name = "jsonschema-specifications" +version = "2023.7.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"}, + {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"}, +] + +[package.dependencies] +referencing = ">=0.28.0" + [[package]] name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] [[package]] name = "mypy" version = "0.961" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "mypy-0.961-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:697540876638ce349b01b6786bc6094ccdaba88af446a9abb967293ce6eaa2b0"}, + {file = "mypy-0.961-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b117650592e1782819829605a193360a08aa99f1fc23d1d71e1a75a142dc7e15"}, + {file = "mypy-0.961-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bdd5ca340beffb8c44cb9dc26697628d1b88c6bddf5c2f6eb308c46f269bb6f3"}, + {file = "mypy-0.961-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3e09f1f983a71d0672bbc97ae33ee3709d10c779beb613febc36805a6e28bb4e"}, + {file = "mypy-0.961-cp310-cp310-win_amd64.whl", hash = "sha256:e999229b9f3198c0c880d5e269f9f8129c8862451ce53a011326cad38b9ccd24"}, + {file = "mypy-0.961-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b24be97351084b11582fef18d79004b3e4db572219deee0212078f7cf6352723"}, + {file = "mypy-0.961-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f4a21d01fc0ba4e31d82f0fff195682e29f9401a8bdb7173891070eb260aeb3b"}, + {file = "mypy-0.961-cp36-cp36m-win_amd64.whl", hash = "sha256:439c726a3b3da7ca84a0199a8ab444cd8896d95012c4a6c4a0d808e3147abf5d"}, + {file = "mypy-0.961-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a0b53747f713f490affdceef835d8f0cb7285187a6a44c33821b6d1f46ed813"}, + {file = "mypy-0.961-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e9f70df36405c25cc530a86eeda1e0867863d9471fe76d1273c783df3d35c2e"}, + {file = "mypy-0.961-cp37-cp37m-win_amd64.whl", hash = "sha256:b88f784e9e35dcaa075519096dc947a388319cb86811b6af621e3523980f1c8a"}, + {file = "mypy-0.961-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d5aaf1edaa7692490f72bdb9fbd941fbf2e201713523bdb3f4038be0af8846c6"}, + {file = "mypy-0.961-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9f5f5a74085d9a81a1f9c78081d60a0040c3efb3f28e5c9912b900adf59a16e6"}, + {file = "mypy-0.961-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f4b794db44168a4fc886e3450201365c9526a522c46ba089b55e1f11c163750d"}, + {file = "mypy-0.961-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:64759a273d590040a592e0f4186539858c948302c653c2eac840c7a3cd29e51b"}, + {file = "mypy-0.961-cp38-cp38-win_amd64.whl", hash = "sha256:63e85a03770ebf403291ec50097954cc5caf2a9205c888ce3a61bd3f82e17569"}, + {file = "mypy-0.961-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f1332964963d4832a94bebc10f13d3279be3ce8f6c64da563d6ee6e2eeda932"}, + {file = "mypy-0.961-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:006be38474216b833eca29ff6b73e143386f352e10e9c2fbe76aa8549e5554f5"}, + {file = "mypy-0.961-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9940e6916ed9371809b35b2154baf1f684acba935cd09928952310fbddaba648"}, + {file = "mypy-0.961-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a5ea0875a049de1b63b972456542f04643daf320d27dc592d7c3d9cd5d9bf950"}, + {file = "mypy-0.961-cp39-cp39-win_amd64.whl", hash = "sha256:1ece702f29270ec6af25db8cf6185c04c02311c6bb21a69f423d40e527b75c56"}, + {file = "mypy-0.961-py3-none-any.whl", hash = "sha256:03c6cc893e7563e7b2949b969e63f02c000b32502a1b4d1314cabe391aa87d66"}, + {file = "mypy-0.961.tar.gz", hash = "sha256:f730d56cb924d371c26b8eaddeea3cc07d78ff51c521c6d04899ac6904b75492"}, +] [package.dependencies] mypy-extensions = ">=0.4.3" @@ -198,19 +395,25 @@ reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "0.4.3" -description = "Experimental type system extensions for programs checked with the mypy typechecker." -category = "dev" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." optional = false -python-versions = "*" +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] [[package]] name = "paramiko" -version = "3.1.0" +version = "3.2.0" description = "SSH2 protocol library" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "paramiko-3.2.0-py3-none-any.whl", hash = "sha256:df0f9dd8903bc50f2e10580af687f3015bf592a377cd438d2ec9546467a14eb8"}, + {file = "paramiko-3.2.0.tar.gz", hash = "sha256:93cdce625a8a1dc12204439d45033f3261bdb2c201648cfcdc06f9fd0f94ec29"}, +] [package.dependencies] bcrypt = ">=3.2" @@ -218,86 +421,110 @@ cryptography = ">=3.3" pynacl = ">=1.5" [package.extras] -all = ["pyasn1 (>=0.1.7)", "invoke (>=2.0)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] -gssapi = ["pyasn1 (>=0.1.7)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] invoke = ["invoke (>=2.0)"] [[package]] name = "pathlib2" version = "2.3.7.post1" description = "Object-oriented filesystem paths" -category = "main" optional = false python-versions = "*" +files = [ + {file = "pathlib2-2.3.7.post1-py2.py3-none-any.whl", hash = "sha256:5266a0fd000452f1b3467d782f079a4343c63aaa119221fbdc4e39577489ca5b"}, + {file = "pathlib2-2.3.7.post1.tar.gz", hash = "sha256:9fe0edad898b83c0c3e199c842b27ed216645d2e177757b2dd67384d4113c641"}, +] [package.dependencies] six = "*" [[package]] name = "pathspec" -version = "0.10.1" +version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, +] [[package]] name = "platformdirs" -version = "2.5.2" -description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" +version = "3.9.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.9.1-py3-none-any.whl", hash = "sha256:ad8291ae0ae5072f66c16945166cb11c63394c7a3ad1b1bc9828ca3162da8c2f"}, + {file = "platformdirs-3.9.1.tar.gz", hash = "sha256:1b42b450ad933e981d56e59f1b97495428c9bd60698baab9f3eb3d00d5822421"}, +] [package.extras] -docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)", "sphinx (>=4)"] -test = ["appdirs (==1.4.4)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)", "pytest (>=6)"] +docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"] [[package]] name = "pycodestyle" -version = "2.9.1" +version = "2.10.0" description = "Python style guide checker" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "pycodestyle-2.10.0-py2.py3-none-any.whl", hash = "sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610"}, + {file = "pycodestyle-2.10.0.tar.gz", hash = "sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053"}, +] [[package]] name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] [[package]] name = "pydocstyle" -version = "6.1.1" +version = "6.3.0" description = "Python docstring style checker" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "pydocstyle-6.3.0-py3-none-any.whl", hash = "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019"}, + {file = "pydocstyle-6.3.0.tar.gz", hash = "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"}, +] [package.dependencies] -snowballstemmer = "*" +snowballstemmer = ">=2.2.0" [package.extras] -toml = ["toml"] +toml = ["tomli (>=1.2.3)"] [[package]] name = "pyflakes" version = "2.5.0" description = "passive checker of Python programs" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"}, + {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, +] [[package]] name = "pylama" version = "8.4.1" description = "Code audit tool for python" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "pylama-8.4.1-py3-none-any.whl", hash = "sha256:5bbdbf5b620aba7206d688ed9fc917ecd3d73e15ec1a89647037a09fa3a86e60"}, + {file = "pylama-8.4.1.tar.gz", hash = "sha256:2d4f7aecfb5b7466216d48610c7d6bad1c3990c29cdd392ad08259b161e486f6"}, +] [package.dependencies] mccabe = ">=0.7.0" @@ -306,12 +533,12 @@ pydocstyle = ">=6.1.1" pyflakes = ">=2.5.0" [package.extras] -all = ["pylint", "eradicate", "radon", "mypy", "vulture"] +all = ["eradicate", "mypy", "pylint", "radon", "vulture"] eradicate = ["eradicate"] mypy = ["mypy"] pylint = ["pylint"] radon = ["radon"] -tests = ["pytest (>=7.1.2)", "pytest-mypy", "eradicate (>=2.0.0)", "radon (>=5.1.0)", "mypy", "pylint (>=2.11.1)", "pylama-quotes", "toml", "vulture", "types-setuptools", "types-toml"] +tests = ["eradicate (>=2.0.0)", "mypy", "pylama-quotes", "pylint (>=2.11.1)", "pytest (>=7.1.2)", "pytest-mypy", "radon (>=5.1.0)", "toml", "types-setuptools", "types-toml", "vulture"] toml = ["toml (>=0.10.2)"] vulture = ["vulture"] @@ -319,145 +546,295 @@ vulture = ["vulture"] name = "pynacl" version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] [package.dependencies] cffi = ">=1.4.1" [package.extras] docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] -tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"] - -[[package]] -name = "pyrsistent" -version = "0.19.1" -description = "Persistent/Functional/Immutable data structures" -category = "main" -optional = false -python-versions = ">=3.7" +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyyaml" -version = "6.0" +version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "referencing" +version = "0.30.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.30.0-py3-none-any.whl", hash = "sha256:c257b08a399b6c2f5a3510a50d28ab5dbc7bbde049bcaf954d43c446f83ab548"}, + {file = "referencing-0.30.0.tar.gz", hash = "sha256:47237742e990457f7512c7d27486394a9aadaf876cbfaa4be65b27b4f4d47c6b"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "rpds-py" +version = "0.9.2" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.9.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:ab6919a09c055c9b092798ce18c6c4adf49d24d4d9e43a92b257e3f2548231e7"}, + {file = "rpds_py-0.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d55777a80f78dd09410bd84ff8c95ee05519f41113b2df90a69622f5540c4f8b"}, + {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a216b26e5af0a8e265d4efd65d3bcec5fba6b26909014effe20cd302fd1138fa"}, + {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:29cd8bfb2d716366a035913ced99188a79b623a3512292963d84d3e06e63b496"}, + {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44659b1f326214950a8204a248ca6199535e73a694be8d3e0e869f820767f12f"}, + {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:745f5a43fdd7d6d25a53ab1a99979e7f8ea419dfefebcab0a5a1e9095490ee5e"}, + {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a987578ac5214f18b99d1f2a3851cba5b09f4a689818a106c23dbad0dfeb760f"}, + {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf4151acb541b6e895354f6ff9ac06995ad9e4175cbc6d30aaed08856558201f"}, + {file = "rpds_py-0.9.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:03421628f0dc10a4119d714a17f646e2837126a25ac7a256bdf7c3943400f67f"}, + {file = "rpds_py-0.9.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:13b602dc3e8dff3063734f02dcf05111e887f301fdda74151a93dbbc249930fe"}, + {file = "rpds_py-0.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fae5cb554b604b3f9e2c608241b5d8d303e410d7dfb6d397c335f983495ce7f6"}, + {file = "rpds_py-0.9.2-cp310-none-win32.whl", hash = "sha256:47c5f58a8e0c2c920cc7783113df2fc4ff12bf3a411d985012f145e9242a2764"}, + {file = "rpds_py-0.9.2-cp310-none-win_amd64.whl", hash = "sha256:4ea6b73c22d8182dff91155af018b11aac9ff7eca085750455c5990cb1cfae6e"}, + {file = "rpds_py-0.9.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:e564d2238512c5ef5e9d79338ab77f1cbbda6c2d541ad41b2af445fb200385e3"}, + {file = "rpds_py-0.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f411330a6376fb50e5b7a3e66894e4a39e60ca2e17dce258d53768fea06a37bd"}, + {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e7521f5af0233e89939ad626b15278c71b69dc1dfccaa7b97bd4cdf96536bb7"}, + {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8d3335c03100a073883857e91db9f2e0ef8a1cf42dc0369cbb9151c149dbbc1b"}, + {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d25b1c1096ef0447355f7293fbe9ad740f7c47ae032c2884113f8e87660d8f6e"}, + {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a5d3fbd02efd9cf6a8ffc2f17b53a33542f6b154e88dd7b42ef4a4c0700fdad"}, + {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5934e2833afeaf36bd1eadb57256239785f5af0220ed8d21c2896ec4d3a765f"}, + {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:095b460e117685867d45548fbd8598a8d9999227e9061ee7f012d9d264e6048d"}, + {file = "rpds_py-0.9.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:91378d9f4151adc223d584489591dbb79f78814c0734a7c3bfa9c9e09978121c"}, + {file = "rpds_py-0.9.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:24a81c177379300220e907e9b864107614b144f6c2a15ed5c3450e19cf536fae"}, + {file = "rpds_py-0.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:de0b6eceb46141984671802d412568d22c6bacc9b230174f9e55fc72ef4f57de"}, + {file = "rpds_py-0.9.2-cp311-none-win32.whl", hash = "sha256:700375326ed641f3d9d32060a91513ad668bcb7e2cffb18415c399acb25de2ab"}, + {file = "rpds_py-0.9.2-cp311-none-win_amd64.whl", hash = "sha256:0766babfcf941db8607bdaf82569ec38107dbb03c7f0b72604a0b346b6eb3298"}, + {file = "rpds_py-0.9.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:b1440c291db3f98a914e1afd9d6541e8fc60b4c3aab1a9008d03da4651e67386"}, + {file = "rpds_py-0.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0f2996fbac8e0b77fd67102becb9229986396e051f33dbceada3debaacc7033f"}, + {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f30d205755566a25f2ae0382944fcae2f350500ae4df4e795efa9e850821d82"}, + {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:159fba751a1e6b1c69244e23ba6c28f879a8758a3e992ed056d86d74a194a0f3"}, + {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1f044792e1adcea82468a72310c66a7f08728d72a244730d14880cd1dabe36b"}, + {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9251eb8aa82e6cf88510530b29eef4fac825a2b709baf5b94a6094894f252387"}, + {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01899794b654e616c8625b194ddd1e5b51ef5b60ed61baa7a2d9c2ad7b2a4238"}, + {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0c43f8ae8f6be1d605b0465671124aa8d6a0e40f1fb81dcea28b7e3d87ca1e1"}, + {file = "rpds_py-0.9.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:207f57c402d1f8712618f737356e4b6f35253b6d20a324d9a47cb9f38ee43a6b"}, + {file = "rpds_py-0.9.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b52e7c5ae35b00566d244ffefba0f46bb6bec749a50412acf42b1c3f402e2c90"}, + {file = "rpds_py-0.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:978fa96dbb005d599ec4fd9ed301b1cc45f1a8f7982d4793faf20b404b56677d"}, + {file = "rpds_py-0.9.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6aa8326a4a608e1c28da191edd7c924dff445251b94653988efb059b16577a4d"}, + {file = "rpds_py-0.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aad51239bee6bff6823bbbdc8ad85136c6125542bbc609e035ab98ca1e32a192"}, + {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd4dc3602370679c2dfb818d9c97b1137d4dd412230cfecd3c66a1bf388a196"}, + {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd9da77c6ec1f258387957b754f0df60766ac23ed698b61941ba9acccd3284d1"}, + {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:190ca6f55042ea4649ed19c9093a9be9d63cd8a97880106747d7147f88a49d18"}, + {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:876bf9ed62323bc7dcfc261dbc5572c996ef26fe6406b0ff985cbcf460fc8a4c"}, + {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa2818759aba55df50592ecbc95ebcdc99917fa7b55cc6796235b04193eb3c55"}, + {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ea4d00850ef1e917815e59b078ecb338f6a8efda23369677c54a5825dbebb55"}, + {file = "rpds_py-0.9.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5855c85eb8b8a968a74dc7fb014c9166a05e7e7a8377fb91d78512900aadd13d"}, + {file = "rpds_py-0.9.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:14c408e9d1a80dcb45c05a5149e5961aadb912fff42ca1dd9b68c0044904eb32"}, + {file = "rpds_py-0.9.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:65a0583c43d9f22cb2130c7b110e695fff834fd5e832a776a107197e59a1898e"}, + {file = "rpds_py-0.9.2-cp38-none-win32.whl", hash = "sha256:71f2f7715935a61fa3e4ae91d91b67e571aeb5cb5d10331ab681256bda2ad920"}, + {file = "rpds_py-0.9.2-cp38-none-win_amd64.whl", hash = "sha256:674c704605092e3ebbbd13687b09c9f78c362a4bc710343efe37a91457123044"}, + {file = "rpds_py-0.9.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:07e2c54bef6838fa44c48dfbc8234e8e2466d851124b551fc4e07a1cfeb37260"}, + {file = "rpds_py-0.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f7fdf55283ad38c33e35e2855565361f4bf0abd02470b8ab28d499c663bc5d7c"}, + {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:890ba852c16ace6ed9f90e8670f2c1c178d96510a21b06d2fa12d8783a905193"}, + {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50025635ba8b629a86d9d5474e650da304cb46bbb4d18690532dd79341467846"}, + {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:517cbf6e67ae3623c5127206489d69eb2bdb27239a3c3cc559350ef52a3bbf0b"}, + {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0836d71ca19071090d524739420a61580f3f894618d10b666cf3d9a1688355b1"}, + {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c439fd54b2b9053717cca3de9583be6584b384d88d045f97d409f0ca867d80f"}, + {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f68996a3b3dc9335037f82754f9cdbe3a95db42bde571d8c3be26cc6245f2324"}, + {file = "rpds_py-0.9.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7d68dc8acded354c972116f59b5eb2e5864432948e098c19fe6994926d8e15c3"}, + {file = "rpds_py-0.9.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f963c6b1218b96db85fc37a9f0851eaf8b9040aa46dec112611697a7023da535"}, + {file = "rpds_py-0.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a46859d7f947061b4010e554ccd1791467d1b1759f2dc2ec9055fa239f1bc26"}, + {file = "rpds_py-0.9.2-cp39-none-win32.whl", hash = "sha256:e07e5dbf8a83c66783a9fe2d4566968ea8c161199680e8ad38d53e075df5f0d0"}, + {file = "rpds_py-0.9.2-cp39-none-win_amd64.whl", hash = "sha256:682726178138ea45a0766907957b60f3a1bf3acdf212436be9733f28b6c5af3c"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:196cb208825a8b9c8fc360dc0f87993b8b260038615230242bf18ec84447c08d"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:c7671d45530fcb6d5e22fd40c97e1e1e01965fc298cbda523bb640f3d923b387"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83b32f0940adec65099f3b1c215ef7f1d025d13ff947975a055989cb7fd019a4"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f67da97f5b9eac838b6980fc6da268622e91f8960e083a34533ca710bec8611"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03975db5f103997904c37e804e5f340c8fdabbb5883f26ee50a255d664eed58c"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:987b06d1cdb28f88a42e4fb8a87f094e43f3c435ed8e486533aea0bf2e53d931"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c861a7e4aef15ff91233751619ce3a3d2b9e5877e0fcd76f9ea4f6847183aa16"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02938432352359805b6da099c9c95c8a0547fe4b274ce8f1a91677401bb9a45f"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ef1f08f2a924837e112cba2953e15aacfccbbfcd773b4b9b4723f8f2ddded08e"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:35da5cc5cb37c04c4ee03128ad59b8c3941a1e5cd398d78c37f716f32a9b7f67"}, + {file = "rpds_py-0.9.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:141acb9d4ccc04e704e5992d35472f78c35af047fa0cfae2923835d153f091be"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79f594919d2c1a0cc17d1988a6adaf9a2f000d2e1048f71f298b056b1018e872"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:a06418fe1155e72e16dddc68bb3780ae44cebb2912fbd8bb6ff9161de56e1798"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2eb034c94b0b96d5eddb290b7b5198460e2d5d0c421751713953a9c4e47d10"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b08605d248b974eb02f40bdcd1a35d3924c83a2a5e8f5d0fa5af852c4d960af"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a0805911caedfe2736935250be5008b261f10a729a303f676d3d5fea6900c96a"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab2299e3f92aa5417d5e16bb45bb4586171c1327568f638e8453c9f8d9e0f020"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c8d7594e38cf98d8a7df25b440f684b510cf4627fe038c297a87496d10a174f"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8b9ec12ad5f0a4625db34db7e0005be2632c1013b253a4a60e8302ad4d462afd"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1fcdee18fea97238ed17ab6478c66b2095e4ae7177e35fb71fbe561a27adf620"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:933a7d5cd4b84f959aedeb84f2030f0a01d63ae6cf256629af3081cf3e3426e8"}, + {file = "rpds_py-0.9.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:686ba516e02db6d6f8c279d1641f7067ebb5dc58b1d0536c4aaebb7bf01cdc5d"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0173c0444bec0a3d7d848eaeca2d8bd32a1b43f3d3fde6617aac3731fa4be05f"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d576c3ef8c7b2d560e301eb33891d1944d965a4d7a2eacb6332eee8a71827db6"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed89861ee8c8c47d6beb742a602f912b1bb64f598b1e2f3d758948721d44d468"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1054a08e818f8e18910f1bee731583fe8f899b0a0a5044c6e680ceea34f93876"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99e7c4bb27ff1aab90dcc3e9d37ee5af0231ed98d99cb6f5250de28889a3d502"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c545d9d14d47be716495076b659db179206e3fd997769bc01e2d550eeb685596"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9039a11bca3c41be5a58282ed81ae422fa680409022b996032a43badef2a3752"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fb39aca7a64ad0c9490adfa719dbeeb87d13be137ca189d2564e596f8ba32c07"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2d8b3b3a2ce0eaa00c5bbbb60b6713e94e7e0becab7b3db6c5c77f979e8ed1f1"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:99b1c16f732b3a9971406fbfe18468592c5a3529585a45a35adbc1389a529a03"}, + {file = "rpds_py-0.9.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c27ee01a6c3223025f4badd533bea5e87c988cb0ba2811b690395dfe16088cfe"}, + {file = "rpds_py-0.9.2.tar.gz", hash = "sha256:8d70e8f14900f2657c249ea4def963bed86a29b81f81f5b76b5a9215680de945"}, +] [[package]] name = "scapy" version = "2.5.0" description = "Scapy: interactive packet manipulation tool" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" +files = [ + {file = "scapy-2.5.0.tar.gz", hash = "sha256:5b260c2b754fd8d409ba83ee7aee294ecdbb2c235f9f78fe90bc11cb6e5debc2"}, +] [package.extras] basic = ["ipython"] -complete = ["ipython", "pyx", "cryptography (>=2.0)", "matplotlib"] +complete = ["cryptography (>=2.0)", "ipython", "matplotlib", "pyx"] docs = ["sphinx (>=3.0.0)", "sphinx_rtd_theme (>=0.4.3)", "tox (>=3.0.0)"] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] [[package]] name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "dev" optional = false python-versions = "*" +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] [[package]] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] [[package]] name = "types-pyyaml" -version = "6.0.12.1" +version = "6.0.12.11" description = "Typing stubs for PyYAML" -category = "main" optional = false python-versions = "*" +files = [ + {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, + {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, +] [[package]] name = "typing-extensions" -version = "4.4.0" +version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] [[package]] name = "warlock" version = "2.0.1" description = "Python object model built on JSON schema and JSON patch." -category = "main" optional = false python-versions = ">=3.7,<4.0" +files = [ + {file = "warlock-2.0.1-py3-none-any.whl", hash = "sha256:448df959cec31904f686ac8c6b1dfab80f0cdabce3d303be517dd433eeebf012"}, + {file = "warlock-2.0.1.tar.gz", hash = "sha256:99abbf9525b2a77f2cde896d3a9f18a5b4590db063db65e08207694d2e0137fc"}, +] [package.dependencies] jsonpatch = ">=1,<2" jsonschema = ">=4,<5" [metadata] -lock-version = "1.1" +lock-version = "2.0" python-versions = "^3.10" -content-hash = "907bf4ae92b05bbdb7cf2f37fc63e530702f1fff9990afa1f8e6c369b97ba592" - -[metadata.files] -attrs = [] -bcrypt = [] -black = [] -cffi = [] -click = [] -colorama = [] -cryptography = [] -fabric = [] -invoke = [] -isort = [] -jsonpatch = [] -jsonpointer = [] -jsonschema = [] -mccabe = [] -mypy = [] -mypy-extensions = [] -paramiko = [] -pathlib2 = [] -pathspec = [] -platformdirs = [] -pycodestyle = [] -pycparser = [] -pydocstyle = [] -pyflakes = [] -pylama = [] -pynacl = [] -pyrsistent = [] -pyyaml = [] -scapy = [] -six = [] -snowballstemmer = [] -toml = [] -tomli = [] -types-pyyaml = [] -typing-extensions = [] -warlock = [] +content-hash = "0b1e4a1cb8323e17e5ee5951c97e74bde6e60d0413d7b25b1803d5b2bab39639" diff --git a/dts/pyproject.toml b/dts/pyproject.toml index bd7591f7fb5..6762edfa6b2 100644 --- a/dts/pyproject.toml +++ b/dts/pyproject.toml @@ -1,11 +1,22 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2023 PANTHEON.tech s.r.o. [tool.poetry] name = "dts" version = "0.1.0" -description = "" -authors = ["Owen Hilyard ", "dts@dpdk.org"] +description = "DPDK Test Suite." +license = "BSD-3-Clause" +authors = [ + "Owen Hilyard ", + "Juraj Linkeš ", + "Jeremy Spewock " +] +maintainers = [ + "Lijuan Tu ", + "Juraj Linkeš " +] +documentation = "https://doc.dpdk.org/guides/tools/dts.html" [tool.poetry.dependencies] python = "^3.10" @@ -15,28 +26,28 @@ types-PyYAML = "^6.0.8" fabric = "^2.7.1" scapy = "^2.5.0" -[tool.poetry.dev-dependencies] +[tool.poetry.group.dev.dependencies] mypy = "^0.961" black = "^22.6.0" isort = "^5.10.1" pylama = "^8.4.1" -pyflakes = "2.5.0" +pyflakes = "^2.5.0" toml = "^0.10.2" -[tool.poetry.scripts] -dts = "main:main" - [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.pylama] -linters = "pep8,pylint,mccabe,mypy,pycodestyle,pyflakes" +linters = "mccabe,pycodestyle,pyflakes" format = "pylint" max_line_length = 88 # https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#line-length [tool.mypy] python_version = "3.10" +enable_error_code = ["ignore-without-code"] +show_error_codes = true +warn_unused_ignores = true [tool.isort] profile = "black" diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c index d3f66bb03d5..ef9cd2918fc 100644 --- a/examples/bbdev_app/main.c +++ b/examples/bbdev_app/main.c @@ -241,7 +241,7 @@ bbdev_parse_args(int argc, char **argv, return -1; } app_params->num_enc_cores = - __builtin_popcount(app_params->enc_core_mask); + rte_popcount32(app_params->enc_core_mask); break; case 'd': @@ -252,7 +252,7 @@ bbdev_parse_args(int argc, char **argv, return -1; } app_params->num_dec_cores = - __builtin_popcount(app_params->dec_core_mask); + rte_popcount32(app_params->dec_core_mask); break; case 'p': diff --git a/examples/bond/main.c b/examples/bond/main.c index 9b076bb39fa..90f422ec112 100644 --- a/examples/bond/main.c +++ b/examples/bond/main.c @@ -105,8 +105,8 @@ ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \ RTE_ETHER_ADDR_BYTES(&addr)) -uint16_t slaves[RTE_MAX_ETHPORTS]; -uint16_t slaves_count; +uint16_t members[RTE_MAX_ETHPORTS]; +uint16_t members_count; static uint16_t BOND_PORT = 0xffff; @@ -128,7 +128,7 @@ static struct rte_eth_conf port_conf = { }; static void -slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool) +member_port_init(uint16_t portid, struct rte_mempool *mbuf_pool) { int retval; uint16_t nb_rxd = RTE_RX_DESC_DEFAULT; @@ -252,10 +252,10 @@ bond_port_init(struct rte_mempool *mbuf_pool) rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc " "failed (res=%d)\n", BOND_PORT, retval); - for (i = 0; i < slaves_count; i++) { - if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1) - rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n", - slaves[i], BOND_PORT); + for (i = 0; i < members_count; i++) { + if (rte_eth_bond_member_add(BOND_PORT, members[i]) == -1) + rte_exit(-1, "Oooops! adding member (%u) to bond (%u) failed!\n", + members[i], BOND_PORT); } @@ -283,18 +283,18 @@ bond_port_init(struct rte_mempool *mbuf_pool) if (retval < 0) rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval); - printf("Waiting for slaves to become active..."); + printf("Waiting for members to become active..."); while (wait_counter) { - uint16_t act_slaves[16] = {0}; - if (rte_eth_bond_active_slaves_get(BOND_PORT, act_slaves, 16) == - slaves_count) { + uint16_t act_members[16] = {0}; + if (rte_eth_bond_active_members_get(BOND_PORT, act_members, 16) == + members_count) { printf("\n"); break; } sleep(1); printf("..."); if (--wait_counter == 0) - rte_exit(-1, "\nFailed to activate slaves\n"); + rte_exit(-1, "\nFailed to activate members\n"); } retval = rte_eth_promiscuous_enable(BOND_PORT); @@ -631,7 +631,7 @@ static void cmd_help_parsed(__rte_unused void *parsed_result, "send IP - sends one ARPrequest through bonding for IP.\n" "start - starts listening ARPs.\n" "stop - stops lcore_main.\n" - "show - shows some bond info: ex. active slaves etc.\n" + "show - shows some bond info: ex. active members etc.\n" "help - prints help.\n" "quit - terminate all threads and quit.\n" ); @@ -742,13 +742,13 @@ static void cmd_show_parsed(__rte_unused void *parsed_result, struct cmdline *cl, __rte_unused void *data) { - uint16_t slaves[16] = {0}; + uint16_t members[16] = {0}; uint8_t len = 16; struct rte_ether_addr addr; uint16_t i; int ret; - for (i = 0; i < slaves_count; i++) { + for (i = 0; i < members_count; i++) { ret = rte_eth_macaddr_get(i, &addr); if (ret != 0) { cmdline_printf(cl, @@ -763,9 +763,9 @@ static void cmd_show_parsed(__rte_unused void *parsed_result, rte_spinlock_lock(&global_flag_stru_p->lock); cmdline_printf(cl, - "Active_slaves:%d " + "Active_members:%d " "packets received:Tot:%d Arp:%d IPv4:%d\n", - rte_eth_bond_active_slaves_get(BOND_PORT, slaves, len), + rte_eth_bond_active_members_get(BOND_PORT, members, len), global_flag_stru_p->port_packets[0], global_flag_stru_p->port_packets[1], global_flag_stru_p->port_packets[2]); @@ -836,10 +836,10 @@ main(int argc, char *argv[]) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); /* initialize all ports */ - slaves_count = nb_ports; + members_count = nb_ports; RTE_ETH_FOREACH_DEV(i) { - slave_port_init(i, mbuf_pool); - slaves[i] = i; + member_port_init(i, mbuf_pool); + members[i] = i; } bond_port_init(mbuf_pool); diff --git a/examples/eventdev_pipeline/main.c b/examples/eventdev_pipeline/main.c index 8d6c90f15d2..0c995d1a70e 100644 --- a/examples/eventdev_pipeline/main.c +++ b/examples/eventdev_pipeline/main.c @@ -230,17 +230,17 @@ parse_app_args(int argc, char **argv) break; case 'r': rx_lcore_mask = parse_coremask(optarg); - popcnt = __builtin_popcountll(rx_lcore_mask); + popcnt = rte_popcount64(rx_lcore_mask); fdata->rx_single = (popcnt == 1); break; case 't': tx_lcore_mask = parse_coremask(optarg); - popcnt = __builtin_popcountll(tx_lcore_mask); + popcnt = rte_popcount64(tx_lcore_mask); fdata->tx_single = (popcnt == 1); break; case 'e': sched_lcore_mask = parse_coremask(optarg); - popcnt = __builtin_popcountll(sched_lcore_mask); + popcnt = rte_popcount64(sched_lcore_mask); fdata->sched_single = (popcnt == 1); break; case 'm': diff --git a/examples/fips_validation/main.c b/examples/fips_validation/main.c index 6518c959c48..7ae2c6c0071 100644 --- a/examples/fips_validation/main.c +++ b/examples/fips_validation/main.c @@ -1006,8 +1006,6 @@ prepare_ecdsa_op(void) asym->ecdsa.op_type = RTE_CRYPTO_ASYM_OP_SIGN; asym->ecdsa.message.data = msg.val; asym->ecdsa.message.length = msg.len; - asym->ecdsa.pkey.data = vec.ecdsa.pkey.val; - asym->ecdsa.pkey.length = vec.ecdsa.pkey.len; asym->ecdsa.k.data = vec.ecdsa.k.val; asym->ecdsa.k.length = vec.ecdsa.k.len; @@ -1029,10 +1027,6 @@ prepare_ecdsa_op(void) asym->ecdsa.op_type = RTE_CRYPTO_ASYM_OP_VERIFY; asym->ecdsa.message.data = msg.val; asym->ecdsa.message.length = msg.len; - asym->ecdsa.q.x.data = vec.ecdsa.qx.val; - asym->ecdsa.q.x.length = vec.ecdsa.qx.len; - asym->ecdsa.q.y.data = vec.ecdsa.qy.val; - asym->ecdsa.q.y.length = vec.ecdsa.qy.len; asym->ecdsa.r.data = vec.ecdsa.r.val; asym->ecdsa.r.length = vec.ecdsa.r.len; asym->ecdsa.s.data = vec.ecdsa.s.val; @@ -1570,6 +1564,9 @@ prepare_ecdsa_xform(struct rte_crypto_asym_xform *xform) info.device_name, RTE_CRYPTO_ASYM_OP_SIGN); return -EPERM; } + + xform->ec.pkey.data = vec.ecdsa.pkey.val; + xform->ec.pkey.length = vec.ecdsa.pkey.len; break; case FIPS_TEST_ASYM_SIGVER: if (!rte_cryptodev_asym_xform_capability_check_optype(cap, @@ -1578,6 +1575,11 @@ prepare_ecdsa_xform(struct rte_crypto_asym_xform *xform) info.device_name, RTE_CRYPTO_ASYM_OP_VERIFY); return -EPERM; } + + xform->ec.q.x.data = vec.ecdsa.qx.val; + xform->ec.q.x.length = vec.ecdsa.qx.len; + xform->ec.q.y.data = vec.ecdsa.qy.val; + xform->ec.q.y.length = vec.ecdsa.qy.len; break; default: break; @@ -2034,7 +2036,7 @@ fips_mct_tdes_test(void) } for (k = 0; k < 24; k++) - val_key.val[k] = (__builtin_popcount(val_key.val[k]) & + val_key.val[k] = (rte_popcount32(val_key.val[k]) & 0x1) ? val_key.val[k] : (val_key.val[k] ^ 0x1); diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 72b3bfba9eb..bf98d2618b5 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -568,7 +568,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, static inline void process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, - uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx) + uint8_t nb_pkts, uint16_t portid, void *ctx) { struct ipsec_traffic traffic; @@ -1555,6 +1555,8 @@ add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id, struct lcore_conf *qconf; struct ipsec_ctx *ipsec_ctx; const char *str; + void *sec_ctx; + const struct rte_security_capability *sec_cap; qconf = &lcore_conf[params->lcore_id]; @@ -1569,8 +1571,8 @@ add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id, } /* Required cryptodevs with operation chaining */ - if (!(dev_info->feature_flags & - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + if (!(dev_info->feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING) && + !(dev_info->feature_flags & RTE_CRYPTODEV_FF_SECURITY)) return ret; for (i = dev_info->capabilities; @@ -1600,6 +1602,41 @@ add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id, } } + sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id); + if (sec_ctx == NULL) + return ret; + + sec_cap = rte_security_capabilities_get(sec_ctx); + if (sec_cap == NULL) + return ret; + + for (i = sec_cap->crypto_capabilities; + i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { + if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret |= add_mapping(str, cdev_id, qp, params, + ipsec_ctx, NULL, NULL, i); + continue; + } + + if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + for (j = sec_cap->crypto_capabilities; + j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { + if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + ret |= add_mapping(str, cdev_id, qp, params, + ipsec_ctx, i, j, NULL); + } + } + return ret; } diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index a5706bed246..f5cec4a9283 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -205,7 +205,7 @@ verify_ipsec_capabilities(struct rte_security_ipsec_xform *ipsec_xform, static inline int -verify_security_capabilities(struct rte_security_ctx *ctx, +verify_security_capabilities(void *ctx, struct rte_security_session_conf *sess_conf, uint32_t *ol_flags) { @@ -327,9 +327,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], }; if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx( - cdev_id); + void *ctx = rte_cryptodev_get_sec_ctx(cdev_id); /* Set IPsec parameters in conf */ set_ipsec_conf(sa, &(sess_conf.ipsec)); @@ -411,7 +409,7 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, struct rte_ipsec_session *ips) { int32_t ret = 0; - struct rte_security_ctx *sec_ctx; + void *sec_ctx; struct rte_security_session_conf sess_conf = { .action_type = ips->type, .protocol = RTE_SECURITY_PROTOCOL_IPSEC, @@ -490,9 +488,7 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, struct rte_flow_error err; int ret = 0; - sec_ctx = (struct rte_security_ctx *) - rte_eth_dev_get_sec_ctx( - sa->portid); + sec_ctx = rte_eth_dev_get_sec_ctx(sa->portid); if (sec_ctx == NULL) { RTE_LOG(ERR, IPSEC, " rte_eth_dev_get_sec_ctx failed\n"); @@ -657,8 +653,7 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, return -1; } } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { - sec_ctx = (struct rte_security_ctx *) - rte_eth_dev_get_sec_ctx(sa->portid); + sec_ctx = rte_eth_dev_get_sec_ctx(sa->portid); if (sec_ctx == NULL) { RTE_LOG(ERR, IPSEC, diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 6bef2a7285f..5059418456e 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -279,7 +279,7 @@ struct cnt_blk { struct lcore_rx_queue { uint16_t port_id; uint8_t queue_id; - struct rte_security_ctx *sec_ctx; + void *sec_ctx; } __rte_cache_aligned; struct buffer { diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index 58c80c73f02..8d122e8519f 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -20,7 +20,7 @@ struct port_drv_mode_data { void *sess; - struct rte_security_ctx *ctx; + void *ctx; }; typedef void (*ipsec_worker_fn_t)(void); diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h index cf59b9b5ab4..ac980b8bcf8 100644 --- a/examples/ipsec-secgw/ipsec_worker.h +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -119,7 +119,7 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, } static __rte_always_inline void -prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt, +prepare_one_packet(void *ctx, struct rte_mbuf *pkt, struct ipsec_traffic *t) { uint32_t ptype = pkt->packet_type; @@ -230,7 +230,7 @@ prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt, } static __rte_always_inline void -prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts, +prepare_traffic(void *ctx, struct rte_mbuf **pkts, struct ipsec_traffic *t, uint16_t nb_pkts) { int32_t i; diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 3f01cbd9e27..5fbc16bb2ad 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include "perf_core.h" #include "main.h" @@ -2245,6 +2245,7 @@ init_power_library(void) env = rte_power_get_env(); if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ && + env != PM_ENV_AMD_PSTATE_CPUFREQ && env != PM_ENV_CPPC_CPUFREQ) { RTE_LOG(ERR, POWER, "Only ACPI, PSTATE and CPPC mode are supported\n"); @@ -2417,6 +2418,8 @@ autodetect_mode(void) return APP_MODE_LEGACY; if (rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ)) return APP_MODE_LEGACY; + if (rte_power_check_env_supported(PM_ENV_AMD_PSTATE_CPUFREQ)) + return APP_MODE_LEGACY; if (rte_power_check_env_supported(PM_ENV_CPPC_CPUFREQ)) return APP_MODE_LEGACY; diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c index 476ac0c54ff..40e102b38a7 100644 --- a/examples/l3fwd/l3fwd_em.c +++ b/examples/l3fwd/l3fwd_em.c @@ -663,6 +663,7 @@ em_main_loop(__rte_unused void *dummy) return 0; } +#ifdef RTE_LIB_EVENTDEV static __rte_always_inline void em_event_loop_single(struct l3fwd_event_resources *evt_rsrc, const uint8_t flags) @@ -959,6 +960,7 @@ em_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy) em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ); return 0; } +#endif /* Initialize exact match (hash) parameters. 8< */ void diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h index 2e11eefad7c..31cda9ddc1c 100644 --- a/examples/l3fwd/l3fwd_em_hlm.h +++ b/examples/l3fwd/l3fwd_em_hlm.h @@ -255,6 +255,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); } +#ifdef RTE_LIB_EVENTDEV /* * Buffer optimized handling of events, invoked * from main_loop. @@ -347,5 +348,6 @@ l3fwd_em_process_event_vector(struct rte_event_vector *vec, process_event_vector(vec, dst_port); } +#endif /* RTE_LIB_EVENTDEV */ #endif /* __L3FWD_EM_HLM_H__ */ diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c index 32906ab08dc..20be22c6dbe 100644 --- a/examples/l3fwd/l3fwd_event.c +++ b/examples/l3fwd/l3fwd_event.c @@ -2,6 +2,7 @@ * Copyright(C) 2019 Marvell International Ltd. */ +#ifdef RTE_LIB_EVENTDEV #include #include @@ -341,3 +342,4 @@ l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id, rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush, NULL); } +#endif /* RTE_LIB_EVENTDEV */ diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h index e21817c36be..9aad358003e 100644 --- a/examples/l3fwd/l3fwd_event.h +++ b/examples/l3fwd/l3fwd_event.h @@ -6,11 +6,13 @@ #define __L3FWD_EVENTDEV_H__ #include +#include +#include + +#ifdef RTE_LIB_EVENTDEV #include #include #include -#include -#include #include "l3fwd.h" @@ -164,4 +166,5 @@ void l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id, struct rte_event events[], uint16_t nb_enq, uint16_t nb_deq, uint8_t is_vector); +#endif /* RTE_LIB_EVENTDEV */ #endif /* __L3FWD_EVENTDEV_H__ */ diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c index c80573fc582..ddb6e5c38d3 100644 --- a/examples/l3fwd/l3fwd_event_generic.c +++ b/examples/l3fwd/l3fwd_event_generic.c @@ -2,6 +2,7 @@ * Copyright(C) 2019 Marvell International Ltd. */ +#ifdef RTE_LIB_EVENTDEV #include #include "l3fwd.h" @@ -309,3 +310,4 @@ l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops) ops->event_port_setup = l3fwd_event_port_setup_generic; ops->adapter_setup = l3fwd_rx_tx_adapter_setup_generic; } +#endif /* RTE_LIB_EVENTDEV */ diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c index 32cf6571480..cb49a8b9fae 100644 --- a/examples/l3fwd/l3fwd_event_internal_port.c +++ b/examples/l3fwd/l3fwd_event_internal_port.c @@ -2,6 +2,7 @@ * Copyright(C) 2019 Marvell International Ltd. */ +#ifdef RTE_LIB_EVENTDEV #include #include "l3fwd.h" @@ -311,3 +312,4 @@ l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops) ops->event_port_setup = l3fwd_event_port_setup_internal_port; ops->adapter_setup = l3fwd_rx_tx_adapter_setup_internal_port; } +#endif /* RTE_LIB_EVENTDEV */ diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c index 18398492aeb..6a219844158 100644 --- a/examples/l3fwd/l3fwd_fib.c +++ b/examples/l3fwd/l3fwd_fib.c @@ -253,6 +253,7 @@ fib_main_loop(__rte_unused void *dummy) return 0; } +#ifdef RTE_LIB_EVENTDEV /* One eventdev loop for single and burst using fib. */ static __rte_always_inline void fib_event_loop(struct l3fwd_event_resources *evt_rsrc, @@ -635,6 +636,7 @@ fib_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy) fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ); return 0; } +#endif /* Function to setup fib. 8< */ void diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c index 4ac1925c843..a484a33089d 100644 --- a/examples/l3fwd/l3fwd_lpm.c +++ b/examples/l3fwd/l3fwd_lpm.c @@ -226,6 +226,7 @@ lpm_main_loop(__rte_unused void *dummy) return 0; } +#ifdef RTE_LIB_EVENTDEV static __rte_always_inline uint16_t lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf) { @@ -554,6 +555,7 @@ lpm_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy) lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ); return 0; } +#endif void setup_lpm(const int socketid) diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index a4f061537e3..6063eb13990 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -135,8 +135,10 @@ static struct rte_eth_conf port_conf = { uint32_t max_pkt_len; -static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS]; +#ifdef RTE_LIB_EVENTDEV static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS]; +#endif +static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS]; static uint8_t lkp_per_socket[NB_SOCKETS]; struct l3fwd_lkp_mode { @@ -398,8 +400,10 @@ print_usage(const char *prgname) " [--parse-ptype]" " [--per-port-pool]" " [--mode]" +#ifdef RTE_LIB_EVENTDEV " [--eventq-sched]" " [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]" +#endif " [-E]" " [-L]\n\n" @@ -422,6 +426,7 @@ print_usage(const char *prgname) " --per-port-pool: Use separate buffer pool per port\n" " --mode: Packet transfer mode for I/O, poll or eventdev\n" " Default mode = poll\n" +#ifdef RTE_LIB_EVENTDEV " --eventq-sched: Event queue synchronization method\n" " ordered, atomic or parallel.\n" " Default: atomic\n" @@ -432,6 +437,7 @@ print_usage(const char *prgname) " --event-vector: Enable event vectorization.\n" " --event-vector-size: Max vector size if event vectorization is enabled.\n" " --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n" +#endif " -E : Enable exact match, legacy flag please use --lookup=em instead\n" " -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n" " --rule_ipv4=FILE: Specify the ipv4 rules entries file.\n" @@ -559,14 +565,16 @@ parse_eth_dest(const char *optarg) } static void -parse_mode(const char *optarg) +parse_mode(const char *optarg __rte_unused) { +#ifdef RTE_LIB_EVENTDEV struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); if (!strcmp(optarg, "poll")) evt_rsrc->enabled = false; else if (!strcmp(optarg, "eventdev")) evt_rsrc->enabled = true; +#endif } static void @@ -601,6 +609,7 @@ parse_queue_size(const char *queue_size_arg, uint16_t *queue_size, int rx) *queue_size = value; } +#ifdef RTE_LIB_EVENTDEV static void parse_eventq_sched(const char *optarg) { @@ -631,6 +640,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues) evt_rsrc->eth_rx_queues = num_eth_rx_queues; } +#endif static int parse_lookup(const char *optarg) @@ -756,9 +766,11 @@ parse_args(int argc, char **argv) int option_index; char *prgname = argv[0]; uint8_t lcore_params = 0; +#ifdef RTE_LIB_EVENTDEV uint8_t eventq_sched = 0; uint8_t eth_rx_q = 0; struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); +#endif argvopt = argv; @@ -850,6 +862,7 @@ parse_args(int argc, char **argv) parse_mode(optarg); break; +#ifdef RTE_LIB_EVENTDEV case CMD_LINE_OPT_EVENTQ_SYNC_NUM: parse_eventq_sched(optarg); eventq_sched = 1; @@ -860,6 +873,20 @@ parse_args(int argc, char **argv) eth_rx_q = 1; break; + case CMD_LINE_OPT_ENABLE_VECTOR_NUM: + printf("event vectorization is enabled\n"); + evt_rsrc->vector_enabled = 1; + break; + + case CMD_LINE_OPT_VECTOR_SIZE_NUM: + evt_rsrc->vector_size = strtol(optarg, NULL, 10); + break; + + case CMD_LINE_OPT_VECTOR_TMO_NS_NUM: + evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10); + break; +#endif + case CMD_LINE_OPT_LOOKUP_NUM: if (lookup_mode != L3FWD_LOOKUP_DEFAULT) { fprintf(stderr, "Only one lookup mode is allowed at a time!\n"); @@ -875,16 +902,6 @@ parse_args(int argc, char **argv) return -1; break; - case CMD_LINE_OPT_ENABLE_VECTOR_NUM: - printf("event vectorization is enabled\n"); - evt_rsrc->vector_enabled = 1; - break; - case CMD_LINE_OPT_VECTOR_SIZE_NUM: - evt_rsrc->vector_size = strtol(optarg, NULL, 10); - break; - case CMD_LINE_OPT_VECTOR_TMO_NS_NUM: - evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10); - break; case CMD_LINE_OPT_RULE_IPV4_NUM: l3fwd_set_rule_ipv4_name(optarg); break; @@ -900,6 +917,8 @@ parse_args(int argc, char **argv) } } + RTE_SET_USED(lcore_params); /* needed if no eventdev block */ +#ifdef RTE_LIB_EVENTDEV if (evt_rsrc->enabled && lcore_params) { fprintf(stderr, "lcore config is not valid when event mode is selected\n"); return -1; @@ -927,6 +946,7 @@ parse_args(int argc, char **argv) "vector timeout set to default (%" PRIu64 " ns)\n", evt_rsrc->vector_tmo_ns); } +#endif /* * Nothing is selected, pick longest-prefix match @@ -962,7 +982,9 @@ print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) int init_mem(uint16_t portid, unsigned int nb_mbuf) { +#ifdef RTE_LIB_EVENTDEV struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); +#endif struct lcore_conf *qconf; int socketid; unsigned lcore_id; @@ -1007,6 +1029,7 @@ init_mem(uint16_t portid, unsigned int nb_mbuf) } } +#ifdef RTE_LIB_EVENTDEV if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) { unsigned int nb_vec; @@ -1025,6 +1048,7 @@ init_mem(uint16_t portid, unsigned int nb_mbuf) printf("Allocated vector pool for port %d\n", portid); } +#endif qconf = &lcore_conf[lcore_id]; qconf->ipv4_lookup_struct = @@ -1406,6 +1430,7 @@ l3fwd_service_enable(uint32_t service_id) return 0; } +#ifdef RTE_LIB_EVENTDEV static void l3fwd_event_service_setup(void) { @@ -1458,16 +1483,20 @@ l3fwd_event_service_setup(void) l3fwd_service_enable(service_id); } } +#endif int main(int argc, char **argv) { +#ifdef RTE_LIB_EVENTDEV struct l3fwd_event_resources *evt_rsrc; + int i; +#endif struct lcore_conf *qconf; uint16_t queueid, portid; unsigned int lcore_id; uint8_t queue; - int i, ret; + int ret; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -1487,7 +1516,9 @@ main(int argc, char **argv) *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid]; } +#ifdef RTE_LIB_EVENTDEV evt_rsrc = l3fwd_get_eventdev_rsrc(); +#endif /* parse application arguments (after the EAL ones) */ ret = parse_args(argc, argv); if (ret < 0) @@ -1499,6 +1530,7 @@ main(int argc, char **argv) /* Add the config file rules */ l3fwd_lkp.read_config_files(); +#ifdef RTE_LIB_EVENTDEV evt_rsrc->per_port_pool = per_port_pool; evt_rsrc->pkt_pool = pktmbuf_pool; evt_rsrc->vec_pool = vector_pool; @@ -1514,6 +1546,7 @@ main(int argc, char **argv) l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop; l3fwd_event_service_setup(); } else +#endif l3fwd_poll_resource_setup(); /* start ports */ @@ -1562,6 +1595,8 @@ main(int argc, char **argv) ret = 0; /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN); + +#ifdef RTE_LIB_EVENTDEV if (evt_rsrc->enabled) { for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) rte_event_eth_rx_adapter_stop( @@ -1589,7 +1624,9 @@ main(int argc, char **argv) rte_event_dev_stop(evt_rsrc->event_d_id); rte_event_dev_close(evt_rsrc->event_d_id); - } else { + } else +#endif + { rte_eal_mp_wait_lcore(); RTE_ETH_FOREACH_DEV(portid) { diff --git a/examples/l3fwd/meson.build b/examples/l3fwd/meson.build index b40244a9416..c25de77bba7 100644 --- a/examples/l3fwd/meson.build +++ b/examples/l3fwd/meson.build @@ -7,7 +7,7 @@ # DPDK instance, use 'make' allow_experimental_apis = true -deps += ['acl', 'hash', 'lpm', 'fib', 'eventdev'] +deps += ['acl', 'hash', 'lpm', 'fib'] sources = files( 'l3fwd_acl.c', 'l3fwd_em.c', @@ -18,3 +18,6 @@ sources = files( 'l3fwd_lpm.c', 'main.c', ) +if dpdk_conf.has('RTE_LIB_EVENTDEV') + deps += 'eventdev' +endif diff --git a/examples/vhost/main.c b/examples/vhost/main.c index bfe466ff77a..ce5c1efddf5 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "main.h" @@ -1807,7 +1808,7 @@ static const struct rte_vhost_device_ops virtio_net_device_ops = * This is a thread will wake up after a period to print stats if the user has * enabled them. */ -static void * +static uint32_t print_stats(__rte_unused void *arg) { struct vhost_dev *vdev; @@ -1852,7 +1853,7 @@ print_stats(__rte_unused void *arg) fflush(stdout); } - return NULL; + return 0; } static void @@ -1907,7 +1908,7 @@ main(int argc, char *argv[]) unsigned nb_ports, valid_num_ports; int ret, i; uint16_t portid; - static pthread_t tid; + rte_thread_t tid; uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS; signal(SIGINT, sigint_handler); @@ -1986,11 +1987,11 @@ main(int argc, char *argv[]) /* Enable stats if the user option is set. */ if (enable_stats) { - ret = rte_ctrl_thread_create(&tid, "print-stats", NULL, + ret = rte_thread_create_control(&tid, "dpdk-vhost-stat", print_stats, NULL); if (ret < 0) rte_exit(EXIT_FAILURE, - "Cannot create print-stats thread\n"); + "Cannot create dpdk-vhost-stat thread\n"); } /* Launch all data cores. */ diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c index 3709d7ed06b..376f7b89a7a 100644 --- a/examples/vhost_blk/vhost_blk.c +++ b/examples/vhost_blk/vhost_blk.c @@ -5,8 +5,6 @@ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif -#include -#include #include #include @@ -529,12 +527,10 @@ process_vq(struct vhost_blk_queue *vq) } } -static void * +static uint32_t ctrlr_worker(void *arg) { struct vhost_blk_ctrlr *ctrlr = (struct vhost_blk_ctrlr *)arg; - cpu_set_t cpuset; - pthread_t thread; int i; fprintf(stdout, "Ctrlr Worker Thread start\n"); @@ -546,11 +542,6 @@ ctrlr_worker(void *arg) exit(0); } - thread = pthread_self(); - CPU_ZERO(&cpuset); - CPU_SET(0, &cpuset); - pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset); - for (i = 0; i < NUM_OF_BLK_QUEUES; i++) submit_inflight_vq(&ctrlr->queues[i]); @@ -560,7 +551,7 @@ ctrlr_worker(void *arg) fprintf(stdout, "Ctrlr Worker Thread Exiting\n"); sem_post(&exit_sem); - return NULL; + return 0; } static int @@ -605,7 +596,7 @@ new_device(int vid) struct vhost_blk_queue *vq; char path[PATH_MAX]; uint64_t features, protocol_features; - pthread_t tid; + rte_thread_t tid; int i, ret; bool packed_ring, inflight_shmfd; @@ -686,15 +677,15 @@ new_device(int vid) /* start polling vring */ worker_thread_status = WORKER_STATE_START; fprintf(stdout, "New Device %s, Device ID %d\n", path, vid); - if (rte_ctrl_thread_create(&tid, "vhostblk-ctrlr", NULL, - &ctrlr_worker, ctrlr) != 0) { + if (rte_thread_create_control(&tid, "dpdk-vhost-blk", + &ctrlr_worker, ctrlr) != 0) { fprintf(stderr, "Worker Thread Started Failed\n"); return -1; } /* device has been started */ ctrlr->started = 1; - pthread_detach(tid); + rte_thread_detach(tid); return 0; } diff --git a/kernel/linux/meson.build b/kernel/linux/meson.build deleted file mode 100644 index 8d47074621f..00000000000 --- a/kernel/linux/meson.build +++ /dev/null @@ -1,103 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2018 Intel Corporation - -subdirs = [] - -kernel_build_dir = get_option('kernel_dir') -kernel_source_dir = get_option('kernel_dir') -kernel_install_dir = '' -install = not meson.is_cross_build() -cross_args = [] - -if not meson.is_cross_build() - # native build - kernel_version = run_command('uname', '-r', check: true).stdout().strip() - if kernel_source_dir != '' - # Try kernel release from sources first - r = run_command('make', '-s', '-C', kernel_source_dir, 'kernelrelease', check: false) - if r.returncode() == 0 - kernel_version = r.stdout().strip() - endif - else - # use default path for native builds - kernel_source_dir = '/lib/modules/' + kernel_version + '/source' - endif - kernel_install_dir = '/lib/modules/' + kernel_version + '/extra/dpdk' - if kernel_build_dir == '' - # use default path for native builds - kernel_build_dir = '/lib/modules/' + kernel_version + '/build' - endif - - # test running make in kernel directory, using "make kernelversion" - make_returncode = run_command('make', '-sC', kernel_build_dir, - 'kernelversion', check: true).returncode() - if make_returncode != 0 - # backward compatibility: - # the headers could still be in the 'build' subdir - if not kernel_build_dir.endswith('build') and not kernel_build_dir.endswith('build/') - kernel_build_dir = join_paths(kernel_build_dir, 'build') - make_returncode = run_command('make', '-sC', kernel_build_dir, - 'kernelversion', check: true).returncode() - endif - endif - - if make_returncode != 0 - error('Cannot compile kernel modules as requested - are kernel headers installed?') - endif - - # DO ACTUAL MODULE BUILDING - foreach d:subdirs - subdir(d) - endforeach - - subdir_done() -endif - -# cross build -# if we are cross-compiling we need kernel_build_dir specified -if kernel_build_dir == '' - error('Need "kernel_dir" option for kmod compilation when cross-compiling') -endif -cross_compiler = find_program('c').path() -if cross_compiler.endswith('gcc') - cross_prefix = run_command([py3, '-c', 'print("' + cross_compiler + '"[:-3])'], - check: true).stdout().strip() -elif cross_compiler.endswith('clang') - cross_prefix = '' - found_target = false - # search for '-target' and use the arg that follows - # (i.e. the value of '-target') as cross_prefix - foreach cross_c_arg : meson.get_cross_property('c_args') - if found_target and cross_prefix == '' - cross_prefix = cross_c_arg - endif - if cross_c_arg == '-target' - found_target = true - endif - endforeach - if cross_prefix == '' - error('Did not find -target and its value in c_args in input cross-file.') - endif - linker = 'lld' - foreach cross_c_link_arg : meson.get_cross_property('c_link_args') - if cross_c_link_arg.startswith('-fuse-ld') - linker = cross_c_link_arg.split('=')[1] - endif - endforeach - cross_args += ['CC=@0@'.format(cross_compiler), 'LD=ld.@0@'.format(linker)] -else - error('Unsupported cross compiler: @0@'.format(cross_compiler)) -endif - -cross_arch = host_machine.cpu_family() -if host_machine.cpu_family() == 'aarch64' - cross_arch = 'arm64' -endif - -cross_args += ['ARCH=@0@'.format(cross_arch), - 'CROSS_COMPILE=@0@'.format(cross_prefix)] - -# DO ACTUAL MODULE BUILDING -foreach d:subdirs - subdir(d) -endforeach diff --git a/kernel/meson.build b/kernel/meson.build index b247e2df42f..417735b010a 100644 --- a/kernel/meson.build +++ b/kernel/meson.build @@ -1,4 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -subdir(exec_env) +if is_freebsd + subdir(exec_env) +endif diff --git a/lib/acl/acl_bld.c b/lib/acl/acl_bld.c index 2816632803b..418751e9f4c 100644 --- a/lib/acl/acl_bld.c +++ b/lib/acl/acl_bld.c @@ -1091,7 +1091,7 @@ acl_calc_wildness(struct rte_acl_build_rule *head, switch (rule->config->defs[n].type) { case RTE_ACL_FIELD_TYPE_BITMASK: - wild = (size - __builtin_popcountll( + wild = (size - rte_popcount64( fld->mask_range.u64 & msk_val)) / size; break; diff --git a/lib/acl/acl_run_avx512.c b/lib/acl/acl_run_avx512.c index 3b8795561ba..30b8214ab5e 100644 --- a/lib/acl/acl_run_avx512.c +++ b/lib/acl/acl_run_avx512.c @@ -45,13 +45,13 @@ update_flow_mask(const struct acl_flow_avx512 *flow, uint32_t *fmsk, fmsk[0] ^= rmsk[0]; m = rmsk[0]; - k = __builtin_popcount(m); + k = rte_popcount32(m); n = flow->total_packets - flow->num_packets; if (n < k) { /* reduce mask */ for (i = k - n; i != 0; i--) { - j = sizeof(m) * CHAR_BIT - 1 - __builtin_clz(m); + j = sizeof(m) * CHAR_BIT - 1 - rte_clz32(m); m ^= 1 << j; } } else diff --git a/lib/acl/acl_run_avx512_common.h b/lib/acl/acl_run_avx512_common.h index 578eaa1d0cf..67eb2af774e 100644 --- a/lib/acl/acl_run_avx512_common.h +++ b/lib/acl/acl_run_avx512_common.h @@ -192,7 +192,7 @@ _F_(start_flow)(struct acl_flow_avx512 *flow, uint32_t num, uint32_t msk, m[1] = msk >> _SIMD_PTR_NUM_; /* calculate masks for new flows */ - n = __builtin_popcount(m[0]); + n = rte_popcount32(m[0]); nm[0] = (1 << n) - 1; nm[1] = (1 << (num - n)) - 1; diff --git a/lib/acl/meson.build b/lib/acl/meson.build index fbe17f9454c..9cba08321a7 100644 --- a/lib/acl/meson.build +++ b/lib/acl/meson.build @@ -14,24 +14,11 @@ headers = files('rte_acl.h', 'rte_acl_osdep.h') if dpdk_conf.has('RTE_ARCH_X86') sources += files('acl_run_sse.c') - # compile AVX2 version if either: - # a. we have AVX supported in minimum instruction set baseline - # b. it's not minimum instruction set, but supported by compiler - # - # in former case, just add avx2 C file to files list - # in latter case, compile c file to static lib, using correct compiler - # flags, and then have the .o file from static lib linked into main lib. - if cc.get_define('__AVX2__', args: machine_args) != '' - sources += files('acl_run_avx2.c') - cflags += '-DCC_AVX2_SUPPORT' - elif cc.has_argument('-mavx2') - avx2_tmplib = static_library('avx2_tmp', - 'acl_run_avx2.c', - dependencies: static_rte_eal, - c_args: cflags + ['-mavx2']) - objs += avx2_tmplib.extract_objects('acl_run_avx2.c') - cflags += '-DCC_AVX2_SUPPORT' - endif + avx2_tmplib = static_library('avx2_tmp', + 'acl_run_avx2.c', + dependencies: static_rte_eal, + c_args: cflags + ['-mavx2']) + objs += avx2_tmplib.extract_objects('acl_run_avx2.c') # compile AVX512 version if: # we are building 64-bit binary AND binutils can generate proper code diff --git a/lib/acl/rte_acl.c b/lib/acl/rte_acl.c index a61c3ba188d..4182006d1d1 100644 --- a/lib/acl/rte_acl.c +++ b/lib/acl/rte_acl.c @@ -42,10 +42,9 @@ rte_acl_classify_avx512x32(__rte_unused const struct rte_acl_ctx *ctx, } #endif -#ifndef CC_AVX2_SUPPORT +#ifndef RTE_ARCH_X86 /* - * If the compiler doesn't support AVX2 instructions, - * then the dummy one would be used instead for AVX2 classify method. + * If ISA doesn't have AVX2 or SSE, provide dummy fallbacks */ int rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx, @@ -56,9 +55,6 @@ rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx, { return -ENOTSUP; } -#endif - -#ifndef RTE_ARCH_X86 int rte_acl_classify_sse(__rte_unused const struct rte_acl_ctx *ctx, __rte_unused const uint8_t **data, @@ -182,7 +178,7 @@ acl_check_alg_x86(enum rte_acl_classify_alg alg) } if (alg == RTE_ACL_CLASSIFY_AVX2) { -#ifdef CC_AVX2_SUPPORT +#ifdef RTE_ARCH_X86 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) && rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) return 0; diff --git a/lib/bbdev/rte_bbdev.c b/lib/bbdev/rte_bbdev.c index 581509b5a21..155323e6ca9 100644 --- a/lib/bbdev/rte_bbdev.c +++ b/lib/bbdev/rte_bbdev.c @@ -24,7 +24,7 @@ #define DEV_NAME "BBDEV" /* Number of supported operation types in *rte_bbdev_op_type*. */ -#define BBDEV_OP_TYPE_COUNT 6 +#define BBDEV_OP_TYPE_COUNT 7 /* BBDev library logging ID */ RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE); @@ -441,6 +441,7 @@ rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, const struct rte_bbdev_op_cap *p; struct rte_bbdev_queue_conf *stored_conf; const char *op_type_str; + unsigned int max_priority; VALID_DEV_OR_RET_ERR(dev, dev_id); VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); @@ -494,20 +495,16 @@ rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, conf->queue_size, queue_id, dev_id); return -EINVAL; } - if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC && - conf->priority > dev_info.max_ul_queue_priority) { + if ((uint8_t)conf->op_type >= RTE_BBDEV_OP_TYPE_SIZE_MAX) { rte_bbdev_log(ERR, - "Priority (%u) of queue %u of bbdev %u must be <= %u", - conf->priority, queue_id, dev_id, - dev_info.max_ul_queue_priority); + "Invalid operation type (%u) ", conf->op_type); return -EINVAL; } - if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC && - conf->priority > dev_info.max_dl_queue_priority) { + max_priority = dev_info.queue_priority[conf->op_type]; + if (conf->priority > max_priority) { rte_bbdev_log(ERR, "Priority (%u) of queue %u of bbdev %u must be <= %u", - conf->priority, queue_id, dev_id, - dev_info.max_dl_queue_priority); + conf->priority, queue_id, dev_id, max_priority); return -EINVAL; } } @@ -857,6 +854,9 @@ get_bbdev_op_size(enum rte_bbdev_op_type type) case RTE_BBDEV_OP_FFT: result = sizeof(struct rte_bbdev_fft_op); break; + case RTE_BBDEV_OP_MLDTS: + result = sizeof(struct rte_bbdev_mldts_op); + break; default: break; } @@ -884,6 +884,10 @@ bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element, struct rte_bbdev_fft_op *op = element; memset(op, 0, mempool->elt_size); op->mempool = mempool; + } else if (type == RTE_BBDEV_OP_MLDTS) { + struct rte_bbdev_mldts_op *op = element; + memset(op, 0, mempool->elt_size); + op->mempool = mempool; } } @@ -1135,6 +1139,7 @@ rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type) "RTE_BBDEV_OP_LDPC_DEC", "RTE_BBDEV_OP_LDPC_ENC", "RTE_BBDEV_OP_FFT", + "RTE_BBDEV_OP_MLDTS", }; if (op_type < BBDEV_OP_TYPE_COUNT) diff --git a/lib/bbdev/rte_bbdev.h b/lib/bbdev/rte_bbdev.h index f124e1f5db2..d12e2e7fbcf 100644 --- a/lib/bbdev/rte_bbdev.h +++ b/lib/bbdev/rte_bbdev.h @@ -349,6 +349,8 @@ struct rte_bbdev_driver_info { const struct rte_bbdev_op_cap *capabilities; /** Device cpu_flag requirements */ const enum rte_cpu_flag_t *cpu_flag_reqs; + /** FFT windowing width for 2048 FFT - size defined in capability. */ + uint16_t *fft_window_width; }; /** Macro used at end of bbdev PMD list */ @@ -438,6 +440,12 @@ typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)( struct rte_bbdev_fft_op **ops, uint16_t num); +/** @internal Enqueue MLD-TS operations for processing on queue of a device. */ +typedef uint16_t (*rte_bbdev_enqueue_mldts_ops_t)( + struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_mldts_op **ops, + uint16_t num); + /** @internal Dequeue encode operations from a queue of a device. */ typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)( struct rte_bbdev_queue_data *q_data, @@ -453,6 +461,11 @@ typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)( struct rte_bbdev_queue_data *q_data, struct rte_bbdev_fft_op **ops, uint16_t num); +/** @internal Dequeue MLDTS operations from a queue of a device. */ +typedef uint16_t (*rte_bbdev_dequeue_mldts_ops_t)( + struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_mldts_op **ops, uint16_t num); + #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */ /** @@ -512,6 +525,10 @@ struct __rte_cache_aligned rte_bbdev { /** User application callback for interrupts if present */ struct rte_bbdev_cb_list list_cbs; struct rte_intr_handle *intr_handle; /**< Device interrupt handle */ + /** Enqueue MLD-TS function */ + rte_bbdev_enqueue_mldts_ops_t enqueue_mldts_ops; + /** Dequeue MLD-TS function */ + rte_bbdev_dequeue_mldts_ops_t dequeue_mldts_ops; }; /** @internal array of all devices */ @@ -668,6 +685,36 @@ rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id, return dev->enqueue_fft_ops(q_data, ops, num_ops); } +/** + * Enqueue a burst of MLDTS operations to a queue of the device. + * This functions only enqueues as many operations as currently possible and + * does not block until @p num_ops entries in the queue are available. + * This function does not provide any error notification to avoid the + * corresponding overhead. + * + * @param dev_id + * The identifier of the device. + * @param queue_id + * The index of the queue. + * @param ops + * Pointer array containing operations to be enqueued Must have at least + * @p num_ops entries + * @param num_ops + * The maximum number of operations to enqueue. + * + * @return + * The number of operations actually enqueued (this is the number of processed + * entries in the @p ops array). + */ +static inline uint16_t +rte_bbdev_enqueue_mldts_ops(uint16_t dev_id, uint16_t queue_id, + struct rte_bbdev_mldts_op **ops, uint16_t num_ops) +{ + struct rte_bbdev *dev = &rte_bbdev_devices[dev_id]; + struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; + return dev->enqueue_mldts_ops(q_data, ops, num_ops); +} + /** * Dequeue a burst of processed encode operations from a queue of the device. * This functions returns only the current contents of the queue, @@ -823,6 +870,37 @@ rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id, return dev->dequeue_fft_ops(q_data, ops, num_ops); } +/** + * Dequeue a burst of MLDTS operations from a queue of the device. + * This functions returns only the current contents of the queue, and does not + * block until @p num_ops is available. + * This function does not provide any error notification to avoid the + * corresponding overhead. + * + * @param dev_id + * The identifier of the device. + * @param queue_id + * The index of the queue. + * @param ops + * Pointer array where operations will be dequeued to. Must have at least + * @p num_ops entries + * @param num_ops + * The maximum number of operations to dequeue. + * + * @return + * The number of operations actually dequeued (this is the number of entries + * copied into the @p ops array). + */ +__rte_experimental +static inline uint16_t +rte_bbdev_dequeue_mldts_ops(uint16_t dev_id, uint16_t queue_id, + struct rte_bbdev_mldts_op **ops, uint16_t num_ops) +{ + struct rte_bbdev *dev = &rte_bbdev_devices[dev_id]; + struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; + return dev->dequeue_mldts_ops(q_data, ops, num_ops); +} + /** Definitions of device event types */ enum rte_bbdev_event_type { RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */ diff --git a/lib/bbdev/rte_bbdev_op.h b/lib/bbdev/rte_bbdev_op.h index 138fa19b58d..369ac331bf9 100644 --- a/lib/bbdev/rte_bbdev_op.h +++ b/lib/bbdev/rte_bbdev_op.h @@ -50,6 +50,11 @@ extern "C" { #define RTE_BBDEV_LDPC_MAX_CODE_BLOCKS (256) /* 12 CS maximum */ #define RTE_BBDEV_MAX_CS_2 (6) +#define RTE_BBDEV_MAX_CS (12) +/* MLD-TS up to 4 layers */ +#define RTE_BBDEV_MAX_MLD_LAYERS (4) +/* 12 SB per RB */ +#define RTE_BBDEV_SCPERRB (12) /* * Maximum size to be used to manage the enum rte_bbdev_op_type @@ -198,7 +203,9 @@ enum rte_bbdev_op_ldpcdec_flag_bitmasks { * for HARQ memory. If not set, it is assumed the filler bits are not * in HARQ memory and handled directly by the LDPC decoder. */ - RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS = (1ULL << 19) + RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS = (1ULL << 19), + /** Set if a device supports input/output HARQ 4bits compression. */ + RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION = (1ULL << 20) }; /** Flags for LDPC encoder operation and capability structure */ @@ -238,7 +245,21 @@ enum rte_bbdev_op_fft_flag_bitmasks { /** Set if the input data used FP16 format. */ RTE_BBDEV_FFT_FP16_INPUT = (1ULL << 6), /** Set if the output data uses FP16 format. */ - RTE_BBDEV_FFT_FP16_OUTPUT = (1ULL << 7) + RTE_BBDEV_FFT_FP16_OUTPUT = (1ULL << 7), + /** Flexible adjustment of Timing offset adjustment per CS. */ + RTE_BBDEV_FFT_TIMING_OFFSET_PER_CS = (1ULL << 8), + /** Flexible adjustment of Timing error correction per CS. */ + RTE_BBDEV_FFT_TIMING_ERROR = (1ULL << 9), + /** Set for optional frequency domain dewindowing. */ + RTE_BBDEV_FFT_DEWINDOWING = (1ULL << 10), + /** Flexible adjustment of frequency resampling mode. */ + RTE_BBDEV_FFT_FREQ_RESAMPLING = (1ULL << 11) +}; + +/** Flags for MLDTS operation and capability structure */ +enum rte_bbdev_op_mldts_flag_bitmasks { + /** Set if the device supports C/R repetition options. */ + RTE_BBDEV_MLDTS_REP = (1ULL << 0), }; /** Flags for the Code Block/Transport block mode */ @@ -746,6 +767,8 @@ struct rte_bbdev_op_fft { struct rte_bbdev_op_data base_input; /** Output data starting from first antenna and first cyclic shift. */ struct rte_bbdev_op_data base_output; + /** Optional frequency window input data. */ + struct rte_bbdev_op_data dewindowing_input; /** Optional power measurement output data. */ struct rte_bbdev_op_data power_meas_output; /** Flags from rte_bbdev_op_fft_flag_bitmasks. */ @@ -780,9 +803,48 @@ struct rte_bbdev_op_fft { uint16_t power_shift; /** Adjust the FP6 exponent for INT<->FP16 conversion. */ uint16_t fp16_exp_adjust; + /** Frequency resampling : 0: Transparent Mode1: 4/3 Resample2: 2/3 Resample. */ + int8_t freq_resample_mode; + /** Output depadded size prior to frequency resampling. */ + uint16_t output_depadded_size; + /** Time error correction initial phase. */ + uint16_t cs_theta_0[RTE_BBDEV_MAX_CS]; + /** Time error correction phase increment. */ + uint32_t cs_theta_d[RTE_BBDEV_MAX_CS]; + /* Time offset per CS of time domain samples. */ + int8_t time_offset[RTE_BBDEV_MAX_CS]; }; /* >8 End of structure rte_bbdev_op_fft. */ +/** Operation structure for MLDTS processing. + * + * The output mbuf data structure is expected to be allocated by the + * application with enough room for the output data. + */ + +/* Structure rte_bbdev_op_mldts 8< */ +struct rte_bbdev_op_mldts { + /** Input data QHy from QR decomposition. */ + struct rte_bbdev_op_data qhy_input; + /** Input data R from QR decomposition. */ + struct rte_bbdev_op_data r_input; + /** Output data post MLD-TS. */ + struct rte_bbdev_op_data output; + /** Flags from *rte_bbdev_op_MLDTS_flag_bitmasks*. */ + uint32_t op_flags; + /** Number of RBs. */ + uint16_t num_rbs; + /** Number of layers 2->4. */ + uint16_t num_layers; + /** Modulation order (2->8 QPSK to 256QAM). */ + uint8_t q_m[RTE_BBDEV_MAX_MLD_LAYERS]; + /** Row repetition for the same R matrix - subcarriers. */ + uint8_t r_rep; + /** Column repetition for the same R matrix - symbols. */ + uint8_t c_rep; +}; +/* >8 End of structure rte_bbdev_op_mldts. */ + /** List of the capabilities for the Turbo Decoder */ struct rte_bbdev_op_cap_turbo_dec { /** Flags from rte_bbdev_op_td_flag_bitmasks */ @@ -839,6 +901,18 @@ struct rte_bbdev_op_cap_ldpc_enc { struct rte_bbdev_op_cap_fft { /** Flags from *rte_bbdev_op_fft_flag_bitmasks*. */ uint32_t capability_flags; + /** Num input code block buffers. */ + uint16_t num_buffers_src; + /** Num output code block buffers. */ + uint16_t num_buffers_dst; + /** Number of FFT windows supported. */ + uint16_t fft_windows_num; +}; + +/** List of the capabilities for the MLD */ +struct rte_bbdev_op_cap_mld { + /** Flags from rte_bbdev_op_mldts_flag_bitmasks */ + uint32_t capability_flags; /** Number of input code block buffers. */ uint16_t num_buffers_src; /** Number of output code block buffers. */ @@ -856,6 +930,7 @@ enum rte_bbdev_op_type { RTE_BBDEV_OP_LDPC_DEC, /**< LDPC decode */ RTE_BBDEV_OP_LDPC_ENC, /**< LDPC encode */ RTE_BBDEV_OP_FFT, /**< FFT */ + RTE_BBDEV_OP_MLDTS, /**< MLD-TS */ /* Note: RTE_BBDEV_OP_TYPE_SIZE_MAX must be larger or equal to maximum enum value */ }; @@ -864,7 +939,8 @@ enum { RTE_BBDEV_DRV_ERROR, RTE_BBDEV_DATA_ERROR, RTE_BBDEV_CRC_ERROR, - RTE_BBDEV_SYNDROME_ERROR + RTE_BBDEV_SYNDROME_ERROR, + RTE_BBDEV_ENGINE_ERROR }; /** Structure specifying a single encode operation */ @@ -911,6 +987,18 @@ struct rte_bbdev_fft_op { struct rte_bbdev_op_fft fft; }; +/** Structure specifying a single mldts operation */ +struct rte_bbdev_mldts_op { + /** Status of operation that was performed. */ + int status; + /** Mempool which op instance is in. */ + struct rte_mempool *mempool; + /** Opaque pointer for user data. */ + void *opaque_data; + /** Contains turbo decoder specific parameters. */ + struct rte_bbdev_op_mldts mldts; +}; + /** Operation capabilities supported by a device */ struct rte_bbdev_op_cap { enum rte_bbdev_op_type type; /**< Type of operation */ @@ -920,6 +1008,7 @@ struct rte_bbdev_op_cap { struct rte_bbdev_op_cap_ldpc_dec ldpc_dec; struct rte_bbdev_op_cap_ldpc_enc ldpc_enc; struct rte_bbdev_op_cap_fft fft; + struct rte_bbdev_op_cap_mld mld; } cap; /**< Operation-type specific capabilities */ }; @@ -1057,6 +1146,36 @@ rte_bbdev_fft_op_alloc_bulk(struct rte_mempool *mempool, return rte_mempool_get_bulk(mempool, (void **)ops, num_ops); } +/** + * Bulk allocate MLD operations from a mempool with parameter defaults reset. + * + * @param mempool + * Operation mempool, created by *rte_bbdev_op_pool_create*. + * @param ops + * Output array to place allocated operations. + * @param num_ops + * Number of operations to allocate. + * + * @returns + * - 0 on success. + * - EINVAL if invalid mempool is provided. + */ +__rte_experimental +static inline int +rte_bbdev_mldts_op_alloc_bulk(struct rte_mempool *mempool, + struct rte_bbdev_mldts_op **ops, uint16_t num_ops) +{ + struct rte_bbdev_op_pool_private *priv; + + /* Check type */ + priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mempool); + if (unlikely(priv->type != RTE_BBDEV_OP_MLDTS)) + return -EINVAL; + + /* Get elements */ + return rte_mempool_get_bulk(mempool, (void **)ops, num_ops); +} + /** * Free decode operation structures that were allocated by * rte_bbdev_dec_op_alloc_bulk(). @@ -1109,6 +1228,24 @@ rte_bbdev_fft_op_free_bulk(struct rte_bbdev_fft_op **ops, unsigned int num_ops) rte_mempool_put_bulk(ops[0]->mempool, (void **)ops, num_ops); } +/** + * Free encode operation structures that were allocated by + * rte_bbdev_mldts_op_alloc_bulk(). + * All structures must belong to the same mempool. + * + * @param ops + * Operation structures + * @param num_ops + * Number of structures + */ +__rte_experimental +static inline void +rte_bbdev_mldts_op_free_bulk(struct rte_bbdev_mldts_op **ops, unsigned int num_ops) +{ + if (num_ops > 0) + rte_mempool_put_bulk(ops[0]->mempool, (void **)ops, num_ops); +} + #ifdef __cplusplus } #endif diff --git a/lib/bbdev/version.map b/lib/bbdev/version.map index 4f4bfbbd5ea..eeb7ed475a6 100644 --- a/lib/bbdev/version.map +++ b/lib/bbdev/version.map @@ -50,4 +50,9 @@ EXPERIMENTAL { rte_bbdev_enqueue_status_str; rte_bbdev_fft_op_alloc_bulk; rte_bbdev_fft_op_free_bulk; + #added in 23.11 + rte_bbdev_dequeue_mldts_ops; + rte_bbdev_enqueue_mldts_ops; + rte_bbdev_mldts_op_alloc_bulk; + rte_bbdev_mldts_op_free_bulk; }; diff --git a/lib/bpf/bpf_pkt.c b/lib/bpf/bpf_pkt.c index ffd2db78402..7a8e4a6ef4f 100644 --- a/lib/bpf/bpf_pkt.c +++ b/lib/bpf/bpf_pkt.c @@ -25,7 +25,7 @@ struct bpf_eth_cbi { /* used by both data & control path */ - uint32_t use; /*usage counter */ + RTE_ATOMIC(uint32_t) use; /*usage counter */ const struct rte_eth_rxtx_callback *cb; /* callback handle */ struct rte_bpf *bpf; struct rte_bpf_jit jit; @@ -110,8 +110,8 @@ bpf_eth_cbi_wait(const struct bpf_eth_cbi *cbi) /* in use, busy wait till current RX/TX iteration is finished */ if ((puse & BPF_ETH_CBI_INUSE) != 0) { - RTE_WAIT_UNTIL_MASKED((uint32_t *)(uintptr_t)&cbi->use, - UINT32_MAX, !=, puse, __ATOMIC_RELAXED); + RTE_WAIT_UNTIL_MASKED((__rte_atomic uint32_t *)(uintptr_t)&cbi->use, + UINT32_MAX, !=, puse, rte_memory_order_relaxed); } } diff --git a/lib/bpf/bpf_validate.c b/lib/bpf/bpf_validate.c index 119dc4d3aa7..95b9ef99eff 100644 --- a/lib/bpf/bpf_validate.c +++ b/lib/bpf/bpf_validate.c @@ -370,7 +370,7 @@ eval_umax_bits(uint64_t v, size_t opsz) if (v == 0) return 0; - v = __builtin_clzll(v); + v = rte_clz64(v); return RTE_LEN2MASK(opsz - v, uint64_t); } diff --git a/lib/cmdline/cmdline.c b/lib/cmdline/cmdline.c index 355c7d8ca63..08721ee919b 100644 --- a/lib/cmdline/cmdline.c +++ b/lib/cmdline/cmdline.c @@ -177,40 +177,6 @@ cmdline_quit(struct cmdline *cl) rdline_quit(&cl->rdl); } -int -cmdline_poll(struct cmdline *cl) -{ - int status; - ssize_t read_status; - char c; - - if (!cl) - return -EINVAL; - else if (cl->rdl.status == RDLINE_EXITED) - return RDLINE_EXITED; - - status = cmdline_poll_char(cl); - if (status < 0) - return status; - else if (status > 0) { - c = -1; - read_status = cmdline_read_char(cl, &c); - if (read_status < 0) - return read_status; - - if (read_status == 0) { - /* end of file is implicit quit */ - cmdline_quit(cl); - } else { - status = cmdline_in(cl, &c, 1); - if (status < 0 && cl->rdl.status != RDLINE_EXITED) - return status; - } - } - - return cl->rdl.status; -} - void cmdline_interact(struct cmdline *cl) { diff --git a/lib/cmdline/cmdline.h b/lib/cmdline/cmdline.h index d631cd4bd45..992c8459145 100644 --- a/lib/cmdline/cmdline.h +++ b/lib/cmdline/cmdline.h @@ -43,22 +43,6 @@ __rte_experimental struct rdline * cmdline_get_rdline(struct cmdline *cl); -/** - * @deprecated Function is broken and scheduled for removal. - * - * This function is nonblocking equivalent of ``cmdline_interact()``. It polls - * *cl* for one character and interpret it. If return value is *RDLINE_EXITED* - * it mean that ``cmdline_quit()`` was invoked. - * - * @param cl - * The command line object. - * - * @return - * On success return object status - one of *enum rdline_status*. - * On error return negative value. - */ -__rte_deprecated -int cmdline_poll(struct cmdline *cl); void cmdline_interact(struct cmdline *cl); void cmdline_quit(struct cmdline *cl); diff --git a/lib/cmdline/cmdline_os_unix.c b/lib/cmdline/cmdline_os_unix.c index 9a4ec4e3347..0365e73c115 100644 --- a/lib/cmdline/cmdline_os_unix.c +++ b/lib/cmdline/cmdline_os_unix.c @@ -28,18 +28,6 @@ terminal_restore(const struct cmdline *cl) tcsetattr(fileno(stdin), TCSANOW, &cl->oldterm); } -int -cmdline_poll_char(struct cmdline *cl) -{ - struct pollfd pfd; - - pfd.fd = cl->s_in; - pfd.events = POLLIN; - pfd.revents = 0; - - return poll(&pfd, 1, 0); -} - ssize_t cmdline_read_char(struct cmdline *cl, char *c) { diff --git a/lib/cmdline/cmdline_os_windows.c b/lib/cmdline/cmdline_os_windows.c index 80863bfc8a0..74dc8a18db4 100644 --- a/lib/cmdline/cmdline_os_windows.c +++ b/lib/cmdline/cmdline_os_windows.c @@ -72,74 +72,6 @@ cmdline_is_key_down(const INPUT_RECORD *record) record->Event.KeyEvent.bKeyDown; } -static int -cmdline_poll_char_console(HANDLE handle) -{ - INPUT_RECORD record; - DWORD events; - - if (!PeekConsoleInput(handle, &record, 1, &events)) { - /* Simulate poll(3) behavior on EOF. */ - return (GetLastError() == ERROR_HANDLE_EOF) ? 1 : -1; - } - - if ((events == 0) || !cmdline_is_key_down(&record)) - return 0; - - return 1; -} - -static int -cmdline_poll_char_file(struct cmdline *cl, HANDLE handle) -{ - DWORD type = GetFileType(handle); - - /* Since console is handled by cmdline_poll_char_console(), - * this is either a serial port or input handle had been replaced. - */ - if (type == FILE_TYPE_CHAR) - return cmdline_poll_char_console(handle); - - /* PeekNamedPipe() can handle all pipes and also sockets. */ - if (type == FILE_TYPE_PIPE) { - DWORD bytes_avail; - if (!PeekNamedPipe(handle, NULL, 0, NULL, &bytes_avail, NULL)) - return (GetLastError() == ERROR_BROKEN_PIPE) ? 1 : -1; - return bytes_avail ? 1 : 0; - } - - /* There is no straightforward way to peek a file in Windows - * I/O model. Read the byte, if it is not the end of file, - * buffer it for subsequent read. This will not work with - * a file being appended and probably some other edge cases. - */ - if (type == FILE_TYPE_DISK) { - char c; - int ret; - - ret = _read(cl->s_in, &c, sizeof(c)); - if (ret == 1) { - cl->repeat_count = 1; - cl->repeated_char = c; - } - return ret; - } - - /* GetFileType() failed or file of unknown type, - * which we do not know how to peek anyway. - */ - return -1; -} - -int -cmdline_poll_char(struct cmdline *cl) -{ - HANDLE handle = (HANDLE)_get_osfhandle(cl->s_in); - return cl->oldterm.is_console_input ? - cmdline_poll_char_console(handle) : - cmdline_poll_char_file(cl, handle); -} - ssize_t cmdline_read_char(struct cmdline *cl, char *c) { diff --git a/lib/cmdline/cmdline_private.h b/lib/cmdline/cmdline_private.h index 86a46cdea61..b64f363903c 100644 --- a/lib/cmdline/cmdline_private.h +++ b/lib/cmdline/cmdline_private.h @@ -84,9 +84,6 @@ void terminal_adjust(struct cmdline *cl); /* Restore terminal settings form oldterm. */ void terminal_restore(const struct cmdline *cl); -/* Check if a single character can be read from input. */ -int cmdline_poll_char(struct cmdline *cl); - /* Read one character from input. */ ssize_t cmdline_read_char(struct cmdline *cl, char *c); diff --git a/lib/cmdline/version.map b/lib/cmdline/version.map index db4d904ffbd..97166789016 100644 --- a/lib/cmdline/version.map +++ b/lib/cmdline/version.map @@ -40,7 +40,6 @@ DPDK_24 { cmdline_parse_num; cmdline_parse_portlist; cmdline_parse_string; - cmdline_poll; cmdline_printf; cmdline_quit; cmdline_set_prompt; diff --git a/lib/cryptodev/cryptodev_trace.h b/lib/cryptodev/cryptodev_trace.h index aab44af96b7..935f0d564b8 100644 --- a/lib/cryptodev/cryptodev_trace.h +++ b/lib/cryptodev/cryptodev_trace.h @@ -520,6 +520,15 @@ RTE_TRACE_POINT( rte_trace_point_emit_int(ret); ) +RTE_TRACE_POINT( + rte_cryptodev_trace_asym_xform_capability_check_hash, + RTE_TRACE_POINT_ARGS(uint64_t hash_algos, + enum rte_crypto_auth_algorithm hash, int ret), + rte_trace_point_emit_u64(hash_algos); + rte_trace_point_emit_int(hash); + rte_trace_point_emit_int(ret); +) + RTE_TRACE_POINT( rte_cryptodev_trace_count, RTE_TRACE_POINT_ARGS(uint8_t nb_devs), diff --git a/lib/cryptodev/cryptodev_trace_points.c b/lib/cryptodev/cryptodev_trace_points.c index e2303fdb522..8c47ab1e78f 100644 --- a/lib/cryptodev/cryptodev_trace_points.c +++ b/lib/cryptodev/cryptodev_trace_points.c @@ -144,6 +144,9 @@ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_xform_capability_check_modlen, RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_xform_capability_check_optype, lib.cryptodev.asym.xform.capability.check.optype) +RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_xform_capability_check_hash, + lib.cryptodev.asym.xform.capability.check.hash) + RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_sym_cpu_crypto_process, lib.cryptodev.sym.cpu.crypto.process) diff --git a/lib/cryptodev/rte_crypto.h b/lib/cryptodev/rte_crypto.h index 9b8d0331a4a..dbc2700da58 100644 --- a/lib/cryptodev/rte_crypto.h +++ b/lib/cryptodev/rte_crypto.h @@ -64,9 +64,18 @@ enum rte_crypto_op_sess_type { RTE_CRYPTO_OP_SECURITY_SESSION /**< Security session crypto operation */ }; +/* Auxiliary flags related to crypto operation */ +#define RTE_CRYPTO_OP_AUX_FLAGS_SESS_SOFT_EXPIRY (1 << 0) +/**< Session soft expiry limit has been reached. + * Applicable for any session that has a soft lifetime feature supported. + * + * @see rte_security_ipsec_lifetime + * @see rte_security_tls_record_lifetime + */ + /* Auxiliary flags related to IPsec offload with RTE_SECURITY */ -#define RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY (1 << 0) +#define RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY RTE_CRYPTO_OP_AUX_FLAGS_SESS_SOFT_EXPIRY /**< SA soft expiry limit has been reached */ /** @@ -99,10 +108,45 @@ struct rte_crypto_op { /**< operation session type */ uint8_t aux_flags; /**< Operation specific auxiliary/additional flags. - * These flags carry additional information from the + * These flags carry additional information from/to the * operation. Processing of the same is optional. + * + * The flags are defined as RTE_CRYPTO_OP_AUX_FLAGS_* and + * would be set by PMD for application consumption when + * the status is RTE_CRYPTO_OP_STATUS_SUCCESS. + * In case of errors, the value of this field is undefined. + * + * With TLS record offload (RTE_SECURITY_PROTOCOL_TLS_RECORD), + * application may provide the extra padding required for the plaintext + * provided. This field can be used for passing the same in units of 8B. + * The value would be set by application for PMD consumption. + * + * @see struct rte_security_tls_record_sess_options */ - uint8_t reserved[2]; + union { + struct { + uint8_t content_type; + /**< Content type. The field can act both as input + * and output. + * + * As input, for passing message type in case of record + * write (encrypt) operation. Applicable for, + * 1. TLS 1.2 + * 2. TLS 1.3 + * 3. DTLS 1.2 + * + * As output, for returning message type in case of record + * read (decrypt) operation. Applicable for, + * 1. TLS 1.3 + * + * Message types are listed as RTE_TLS_TYPE_* and + * RTE_DTLS_TYPE_*. + */ + } tls_record; + /**< TLS record */ + } param1; + /**< Additional per operation parameter 1. */ + uint8_t reserved[1]; /**< Reserved bytes to fill 64 bits for * future additions */ diff --git a/lib/cryptodev/rte_crypto_asym.h b/lib/cryptodev/rte_crypto_asym.h index cbcfe1dc26f..39d3da39529 100644 --- a/lib/cryptodev/rte_crypto_asym.h +++ b/lib/cryptodev/rte_crypto_asym.h @@ -29,11 +29,6 @@ extern "C" { struct rte_cryptodev_asym_session; -/** asym xform type name strings */ -__rte_deprecated -extern const char * -rte_crypto_asym_xform_strings[]; - /** asym key exchange operation type name strings */ extern const char * rte_crypto_asym_ke_strings[]; @@ -69,7 +64,8 @@ enum rte_crypto_curve_id { RTE_CRYPTO_EC_GROUP_SECP224R1 = 21, RTE_CRYPTO_EC_GROUP_SECP256R1 = 23, RTE_CRYPTO_EC_GROUP_SECP384R1 = 24, - RTE_CRYPTO_EC_GROUP_SECP521R1 = 25 + RTE_CRYPTO_EC_GROUP_SECP521R1 = 25, + RTE_CRYPTO_EC_GROUP_SM2 = 41, }; /** @@ -376,16 +372,12 @@ struct rte_crypto_dsa_xform { struct rte_crypto_ec_xform { enum rte_crypto_curve_id curve_id; /**< Pre-defined ec groups */ -}; -/** - * Asymmetric SM2 transform data. - * - * Structure describing SM2 xform params. - */ -struct rte_crypto_sm2_xform { - enum rte_crypto_auth_algorithm hash; - /**< Hash algorithm used in SM2 op. */ + rte_crypto_uint pkey; + /**< Private key */ + + struct rte_crypto_ec_point q; + /**< Public key */ }; /** @@ -571,12 +563,6 @@ struct rte_crypto_ecdsa_op_param { enum rte_crypto_asym_op_type op_type; /**< Signature generation or verification */ - rte_crypto_uint pkey; - /**< Private key of the signer for signature generation */ - - struct rte_crypto_ec_point q; - /**< Public key of the signer for verification */ - rte_crypto_param message; /**< Input message digest to be signed or verified */ @@ -644,9 +630,6 @@ struct rte_crypto_asym_xform { /**< EC xform parameters, used by elliptic curve based * operations. */ - - struct rte_crypto_sm2_xform sm2; - /**< SM2 xform parameters */ }; }; @@ -657,11 +640,8 @@ struct rte_crypto_sm2_op_param { enum rte_crypto_asym_op_type op_type; /**< Signature generation or verification. */ - rte_crypto_uint pkey; - /**< Private key for encryption or sign generation. */ - - struct rte_crypto_ec_point q; - /**< Public key for decryption or verification. */ + enum rte_crypto_auth_algorithm hash; + /**< Hash algorithm used in EC op. */ rte_crypto_param message; /**< diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h index bde8010fdda..5be0f053973 100644 --- a/lib/cryptodev/rte_crypto_sym.h +++ b/lib/cryptodev/rte_crypto_sym.h @@ -180,11 +180,6 @@ enum rte_crypto_cipher_algorithm { /**< ShangMi 4 (SM4) algorithm in CFB mode */ }; -/** Cipher algorithm name strings */ -__rte_deprecated -extern const char * -rte_crypto_cipher_algorithm_strings[]; - /** Symmetric Cipher Direction */ enum rte_crypto_cipher_operation { RTE_CRYPTO_CIPHER_OP_ENCRYPT, @@ -389,11 +384,6 @@ enum rte_crypto_auth_algorithm { /** < HMAC using ShangMi 3 (SM3) algorithm */ }; -/** Authentication algorithm name strings */ -__rte_deprecated -extern const char * -rte_crypto_auth_algorithm_strings[]; - /** Symmetric Authentication / Hash Operations */ enum rte_crypto_auth_operation { RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */ @@ -494,11 +484,6 @@ enum rte_crypto_aead_algorithm { /**< Chacha20 cipher with poly1305 authenticator */ }; -/** AEAD algorithm name strings */ -__rte_deprecated -extern const char * -rte_crypto_aead_algorithm_strings[]; - /** Symmetric AEAD Operations */ enum rte_crypto_aead_operation { RTE_CRYPTO_AEAD_OP_ENCRYPT, diff --git a/lib/cryptodev/rte_cryptodev.c b/lib/cryptodev/rte_cryptodev.c index c49d342b175..314710b5f43 100644 --- a/lib/cryptodev/rte_cryptodev.c +++ b/lib/cryptodev/rte_cryptodev.c @@ -63,40 +63,6 @@ struct rte_cryptodev_callback { uint32_t active; /**< Callback is executing */ }; -/** - * @deprecated - * The crypto cipher algorithm strings identifiers. - * It could be used in application command line. - */ -__rte_deprecated -const char * -rte_crypto_cipher_algorithm_strings[] = { - [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", - [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", - [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", - - [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", - [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", - [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", - [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", - [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", - [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", - - [RTE_CRYPTO_CIPHER_ARC4] = "arc4", - - [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", - [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", - - [RTE_CRYPTO_CIPHER_NULL] = "null", - - [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", - [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", - [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", - [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", - [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", - [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr" -}; - /** * The crypto cipher algorithm strings identifiers. * Not to be used in application directly. @@ -142,51 +108,6 @@ rte_crypto_cipher_operation_strings[] = { [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" }; -/** - * @deprecated - * The crypto auth algorithm strings identifiers. - * It could be used in application command line. - */ -__rte_deprecated -const char * -rte_crypto_auth_algorithm_strings[] = { - [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", - [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", - [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", - [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", - - [RTE_CRYPTO_AUTH_MD5] = "md5", - [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", - - [RTE_CRYPTO_AUTH_NULL] = "null", - - [RTE_CRYPTO_AUTH_SHA1] = "sha1", - [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", - - [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", - [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", - [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", - [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", - [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", - [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", - [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", - [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", - - [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", - [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", - [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", - [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", - [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", - [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", - [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", - [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", - - [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", - [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", - [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3", - [RTE_CRYPTO_AUTH_SM3] = "sm3" -}; - /** * The crypto auth algorithm strings identifiers. * Not to be used in application directly. @@ -235,19 +156,6 @@ crypto_auth_algorithm_strings[] = { [RTE_CRYPTO_AUTH_SHAKE_256] = "shake-256", }; -/** - * @deprecated - * The crypto AEAD algorithm strings identifiers. - * It could be used in application command line. - */ -__rte_deprecated -const char * -rte_crypto_aead_algorithm_strings[] = { - [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", - [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", - [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" -}; - /** * The crypto AEAD algorithm strings identifiers. * Not to be used in application directly. @@ -271,22 +179,6 @@ rte_crypto_aead_operation_strings[] = { [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" }; -/** - * @deprecated - * Asymmetric crypto transform operation strings identifiers. - */ -__rte_deprecated -const char *rte_crypto_asym_xform_strings[] = { - [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", - [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", - [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", - [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", - [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", - [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", - [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", - [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", -}; - /** * Asymmetric crypto transform operation strings identifiers. * Not to be used in application directly. @@ -718,6 +610,22 @@ rte_cryptodev_asym_xform_capability_check_modlen( return ret; } +bool +rte_cryptodev_asym_xform_capability_check_hash( + const struct rte_cryptodev_asymmetric_xform_capability *capability, + enum rte_crypto_auth_algorithm hash) +{ + bool ret = false; + + if (capability->hash_algos & (1 << hash)) + ret = true; + + rte_cryptodev_trace_asym_xform_capability_check_hash( + capability->hash_algos, hash, ret); + + return ret; +} + /* spinlock for crypto device enq callbacks */ static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; diff --git a/lib/cryptodev/rte_cryptodev.h b/lib/cryptodev/rte_cryptodev.h index 9246df90efc..be0698ce9fc 100644 --- a/lib/cryptodev/rte_cryptodev.h +++ b/lib/cryptodev/rte_cryptodev.h @@ -181,7 +181,16 @@ struct rte_cryptodev_asymmetric_xform_capability { /**< Range of modulus length supported by modulus based xform. * Value 0 mean implementation default */ + + uint8_t internal_rng; + /**< Availability of random number generator for Elliptic curve based xform. + * Value 0 means unavailable, and application should pass the required + * random value. Otherwise, PMD would internally compute the random number. + */ }; + + uint64_t hash_algos; + /**< Bitmask of hash algorithms supported for op_type. */ }; /** @@ -340,6 +349,22 @@ rte_cryptodev_asym_xform_capability_check_modlen( const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen); +/** + * Check if hash algorithm is supported. + * + * @param capability Asymmetric crypto capability. + * @param hash Hash algorithm. + * + * @return + * - Return true if the hash algorithm is supported. + * - Return false if the hash algorithm is not supported. + */ +__rte_experimental +bool +rte_cryptodev_asym_xform_capability_check_hash( + const struct rte_cryptodev_asymmetric_xform_capability *capability, + enum rte_crypto_auth_algorithm hash); + /** * Provide the cipher algorithm enum, given an algorithm string * @@ -534,6 +559,8 @@ rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum); /**< Support wrapped key in cipher xform */ #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27) /**< Support inner checksum computation/verification */ +#define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT (1ULL << 28) +/**< Support Rx injection after security processing */ /** * Get the name of a crypto device feature flag @@ -971,6 +998,15 @@ struct rte_cryptodev_cb_rcu { /**< RCU QSBR variable per queue pair */ }; +/** + * Get the security context for the cryptodev. + * + * @param dev_id + * The device identifier. + * @return + * - NULL on error. + * - Pointer to security context on success. + */ void * rte_cryptodev_get_sec_ctx(uint8_t dev_id); diff --git a/lib/cryptodev/version.map b/lib/cryptodev/version.map index ae8d9327b4f..208919b8195 100644 --- a/lib/cryptodev/version.map +++ b/lib/cryptodev/version.map @@ -1,11 +1,8 @@ DPDK_24 { global: - rte_crypto_aead_algorithm_strings; rte_crypto_aead_operation_strings; - rte_crypto_auth_algorithm_strings; rte_crypto_auth_operation_strings; - rte_crypto_cipher_algorithm_strings; rte_crypto_cipher_operation_strings; rte_crypto_fp_ops; rte_crypto_op_pool_create; @@ -54,6 +51,7 @@ EXPERIMENTAL { rte_cryptodev_asym_get_xform_enum; rte_cryptodev_asym_session_create; rte_cryptodev_asym_session_free; + rte_cryptodev_asym_xform_capability_check_hash; rte_cryptodev_asym_xform_capability_check_modlen; rte_cryptodev_asym_xform_capability_check_optype; rte_cryptodev_sym_cpu_crypto_process; @@ -61,7 +59,6 @@ EXPERIMENTAL { rte_cryptodev_sym_session_pool_create; rte_cryptodev_sym_session_set_user_data; rte_crypto_asym_op_strings; - rte_crypto_asym_xform_strings; # added in 20.05 __rte_cryptodev_trace_dequeue_burst; diff --git a/lib/dispatcher/meson.build b/lib/dispatcher/meson.build new file mode 100644 index 00000000000..ffaef26a6d7 --- /dev/null +++ b/lib/dispatcher/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 Ericsson AB + +if is_windows + build = false + reason = 'not supported on Windows' + subdir_done() +endif + +sources = files('rte_dispatcher.c') +headers = files('rte_dispatcher.h') + +deps += ['eventdev'] diff --git a/lib/dispatcher/rte_dispatcher.c b/lib/dispatcher/rte_dispatcher.c new file mode 100644 index 00000000000..10d02edde90 --- /dev/null +++ b/lib/dispatcher/rte_dispatcher.c @@ -0,0 +1,694 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Ericsson AB + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "eventdev_pmd.h" + +#include + +#define EVD_MAX_PORTS_PER_LCORE 4 +#define EVD_MAX_HANDLERS 32 +#define EVD_MAX_FINALIZERS 16 +#define EVD_AVG_PRIO_INTERVAL 2000 +#define EVD_SERVICE_NAME "dispatcher" + +struct rte_dispatcher_lcore_port { + uint8_t port_id; + uint16_t batch_size; + uint64_t timeout; +}; + +struct rte_dispatcher_handler { + int id; + rte_dispatcher_match_t match_fun; + void *match_data; + rte_dispatcher_process_t process_fun; + void *process_data; +}; + +struct rte_dispatcher_finalizer { + int id; + rte_dispatcher_finalize_t finalize_fun; + void *finalize_data; +}; + +struct rte_dispatcher_lcore { + uint8_t num_ports; + uint16_t num_handlers; + int32_t prio_count; + struct rte_dispatcher_lcore_port ports[EVD_MAX_PORTS_PER_LCORE]; + struct rte_dispatcher_handler handlers[EVD_MAX_HANDLERS]; + struct rte_dispatcher_stats stats; +} __rte_cache_aligned; + +struct rte_dispatcher { + uint8_t event_dev_id; + int socket_id; + uint32_t service_id; + struct rte_dispatcher_lcore lcores[RTE_MAX_LCORE]; + uint16_t num_finalizers; + struct rte_dispatcher_finalizer finalizers[EVD_MAX_FINALIZERS]; +}; + +static int +evd_lookup_handler_idx(struct rte_dispatcher_lcore *lcore, + const struct rte_event *event) +{ + uint16_t i; + + for (i = 0; i < lcore->num_handlers; i++) { + struct rte_dispatcher_handler *handler = + &lcore->handlers[i]; + + if (handler->match_fun(event, handler->match_data)) + return i; + } + + return -1; +} + +static void +evd_prioritize_handler(struct rte_dispatcher_lcore *lcore, + int handler_idx) +{ + struct rte_dispatcher_handler tmp; + + if (handler_idx == 0) + return; + + /* Let the lucky handler "bubble" up the list */ + + tmp = lcore->handlers[handler_idx - 1]; + lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx]; + lcore->handlers[handler_idx] = tmp; +} + +static inline void +evd_consider_prioritize_handler(struct rte_dispatcher_lcore *lcore, + int handler_idx, uint16_t handler_events) +{ + lcore->prio_count -= handler_events; + + if (unlikely(lcore->prio_count <= 0)) { + evd_prioritize_handler(lcore, handler_idx); + + /* + * Randomize the interval in the unlikely case + * the traffic follow some very strict pattern. + */ + lcore->prio_count = + rte_rand_max(EVD_AVG_PRIO_INTERVAL) + + EVD_AVG_PRIO_INTERVAL / 2; + } +} + +static inline void +evd_dispatch_events(struct rte_dispatcher *dispatcher, + struct rte_dispatcher_lcore *lcore, + struct rte_dispatcher_lcore_port *port, + struct rte_event *events, uint16_t num_events) +{ + int i; + struct rte_event bursts[EVD_MAX_HANDLERS][num_events]; + uint16_t burst_lens[EVD_MAX_HANDLERS] = { 0 }; + uint16_t drop_count = 0; + uint16_t dispatch_count; + uint16_t dispatched = 0; + + for (i = 0; i < num_events; i++) { + struct rte_event *event = &events[i]; + int handler_idx; + + handler_idx = evd_lookup_handler_idx(lcore, event); + + if (unlikely(handler_idx < 0)) { + drop_count++; + continue; + } + + bursts[handler_idx][burst_lens[handler_idx]] = *event; + burst_lens[handler_idx]++; + } + + dispatch_count = num_events - drop_count; + + for (i = 0; i < lcore->num_handlers && + dispatched < dispatch_count; i++) { + struct rte_dispatcher_handler *handler = + &lcore->handlers[i]; + uint16_t len = burst_lens[i]; + + if (len == 0) + continue; + + handler->process_fun(dispatcher->event_dev_id, port->port_id, + bursts[i], len, handler->process_data); + + dispatched += len; + + /* + * Safe, since any reshuffling will only involve + * already-processed handlers. + */ + evd_consider_prioritize_handler(lcore, i, len); + } + + lcore->stats.ev_batch_count++; + lcore->stats.ev_dispatch_count += dispatch_count; + lcore->stats.ev_drop_count += drop_count; + + for (i = 0; i < dispatcher->num_finalizers; i++) { + struct rte_dispatcher_finalizer *finalizer = + &dispatcher->finalizers[i]; + + finalizer->finalize_fun(dispatcher->event_dev_id, + port->port_id, + finalizer->finalize_data); + } +} + +static __rte_always_inline uint16_t +evd_port_dequeue(struct rte_dispatcher *dispatcher, + struct rte_dispatcher_lcore *lcore, + struct rte_dispatcher_lcore_port *port) +{ + uint16_t batch_size = port->batch_size; + struct rte_event events[batch_size]; + uint16_t n; + + n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id, + events, batch_size, port->timeout); + + if (likely(n > 0)) + evd_dispatch_events(dispatcher, lcore, port, events, n); + + lcore->stats.poll_count++; + + return n; +} + +static __rte_always_inline uint16_t +evd_lcore_process(struct rte_dispatcher *dispatcher, + struct rte_dispatcher_lcore *lcore) +{ + uint16_t i; + uint16_t event_count = 0; + + for (i = 0; i < lcore->num_ports; i++) { + struct rte_dispatcher_lcore_port *port = + &lcore->ports[i]; + + event_count += evd_port_dequeue(dispatcher, lcore, port); + } + + return event_count; +} + +static int32_t +evd_process(void *userdata) +{ + struct rte_dispatcher *dispatcher = userdata; + unsigned int lcore_id = rte_lcore_id(); + struct rte_dispatcher_lcore *lcore = + &dispatcher->lcores[lcore_id]; + uint64_t event_count; + + event_count = evd_lcore_process(dispatcher, lcore); + + if (unlikely(event_count == 0)) + return -EAGAIN; + + return 0; +} + +static int +evd_service_register(struct rte_dispatcher *dispatcher) +{ + struct rte_service_spec service = { + .callback = evd_process, + .callback_userdata = dispatcher, + .capabilities = RTE_SERVICE_CAP_MT_SAFE, + .socket_id = dispatcher->socket_id + }; + int rc; + + snprintf(service.name, sizeof(service.name), EVD_SERVICE_NAME); + + rc = rte_service_component_register(&service, &dispatcher->service_id); + if (rc != 0) + RTE_EDEV_LOG_ERR("Registration of dispatcher service " + "%s failed with error code %d\n", + service.name, rc); + + return rc; +} + +static int +evd_service_unregister(struct rte_dispatcher *dispatcher) +{ + int rc; + + rc = rte_service_component_unregister(dispatcher->service_id); + if (rc != 0) + RTE_EDEV_LOG_ERR("Unregistration of dispatcher service " + "failed with error code %d\n", rc); + + return rc; +} + +struct rte_dispatcher * +rte_dispatcher_create(uint8_t event_dev_id) +{ + int socket_id; + struct rte_dispatcher *dispatcher; + int rc; + + socket_id = rte_event_dev_socket_id(event_dev_id); + + dispatcher = + rte_malloc_socket("dispatcher", sizeof(struct rte_dispatcher), + RTE_CACHE_LINE_SIZE, socket_id); + + if (dispatcher == NULL) { + RTE_EDEV_LOG_ERR("Unable to allocate memory for dispatcher\n"); + rte_errno = ENOMEM; + return NULL; + } + + *dispatcher = (struct rte_dispatcher) { + .event_dev_id = event_dev_id, + .socket_id = socket_id + }; + + rc = evd_service_register(dispatcher); + if (rc < 0) { + rte_free(dispatcher); + rte_errno = -rc; + return NULL; + } + + return dispatcher; +} + +int +rte_dispatcher_free(struct rte_dispatcher *dispatcher) +{ + int rc; + + if (dispatcher == NULL) + return 0; + + rc = evd_service_unregister(dispatcher); + if (rc != 0) + return rc; + + rte_free(dispatcher); + + return 0; +} + +uint32_t +rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher) +{ + return dispatcher->service_id; +} + +static int +lcore_port_index(struct rte_dispatcher_lcore *lcore, + uint8_t event_port_id) +{ + uint16_t i; + + for (i = 0; i < lcore->num_ports; i++) { + struct rte_dispatcher_lcore_port *port = + &lcore->ports[i]; + + if (port->port_id == event_port_id) + return i; + } + + return -1; +} + +int +rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher, + uint8_t event_port_id, uint16_t batch_size, uint64_t timeout, + unsigned int lcore_id) +{ + struct rte_dispatcher_lcore *lcore; + struct rte_dispatcher_lcore_port *port; + + lcore = &dispatcher->lcores[lcore_id]; + + if (lcore->num_ports == EVD_MAX_PORTS_PER_LCORE) + return -ENOMEM; + + if (lcore_port_index(lcore, event_port_id) >= 0) + return -EEXIST; + + port = &lcore->ports[lcore->num_ports]; + + *port = (struct rte_dispatcher_lcore_port) { + .port_id = event_port_id, + .batch_size = batch_size, + .timeout = timeout + }; + + lcore->num_ports++; + + return 0; +} + +int +rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher, + uint8_t event_port_id, unsigned int lcore_id) +{ + struct rte_dispatcher_lcore *lcore; + int port_idx; + struct rte_dispatcher_lcore_port *port; + struct rte_dispatcher_lcore_port *last; + + lcore = &dispatcher->lcores[lcore_id]; + + port_idx = lcore_port_index(lcore, event_port_id); + + if (port_idx < 0) + return -ENOENT; + + port = &lcore->ports[port_idx]; + last = &lcore->ports[lcore->num_ports - 1]; + + if (port != last) + *port = *last; + + lcore->num_ports--; + + return 0; +} + +static struct rte_dispatcher_handler * +evd_lcore_get_handler_by_id(struct rte_dispatcher_lcore *lcore, int handler_id) +{ + uint16_t i; + + for (i = 0; i < lcore->num_handlers; i++) { + struct rte_dispatcher_handler *handler = + &lcore->handlers[i]; + + if (handler->id == handler_id) + return handler; + } + + return NULL; +} + +static int +evd_alloc_handler_id(struct rte_dispatcher *dispatcher) +{ + int handler_id = 0; + struct rte_dispatcher_lcore *reference_lcore = + &dispatcher->lcores[0]; + + if (reference_lcore->num_handlers == EVD_MAX_HANDLERS) + return -1; + + while (evd_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL) + handler_id++; + + return handler_id; +} + +static void +evd_lcore_install_handler(struct rte_dispatcher_lcore *lcore, + const struct rte_dispatcher_handler *handler) +{ + int handler_idx = lcore->num_handlers; + + lcore->handlers[handler_idx] = *handler; + lcore->num_handlers++; +} + +static void +evd_install_handler(struct rte_dispatcher *dispatcher, + const struct rte_dispatcher_handler *handler) +{ + int i; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + struct rte_dispatcher_lcore *lcore = + &dispatcher->lcores[i]; + evd_lcore_install_handler(lcore, handler); + } +} + +int +rte_dispatcher_register(struct rte_dispatcher *dispatcher, + rte_dispatcher_match_t match_fun, void *match_data, + rte_dispatcher_process_t process_fun, void *process_data) +{ + struct rte_dispatcher_handler handler = { + .match_fun = match_fun, + .match_data = match_data, + .process_fun = process_fun, + .process_data = process_data + }; + + handler.id = evd_alloc_handler_id(dispatcher); + + if (handler.id < 0) + return -ENOMEM; + + evd_install_handler(dispatcher, &handler); + + return handler.id; +} + +static int +evd_lcore_uninstall_handler(struct rte_dispatcher_lcore *lcore, + int handler_id) +{ + struct rte_dispatcher_handler *unreg_handler; + int handler_idx; + uint16_t last_idx; + + unreg_handler = evd_lcore_get_handler_by_id(lcore, handler_id); + + if (unreg_handler == NULL) { + RTE_EDEV_LOG_ERR("Invalid handler id %d\n", handler_id); + return -EINVAL; + } + + handler_idx = unreg_handler - &lcore->handlers[0]; + + last_idx = lcore->num_handlers - 1; + + if (handler_idx != last_idx) { + /* move all handlers to maintain handler order */ + int n = last_idx - handler_idx; + memmove(unreg_handler, unreg_handler + 1, + sizeof(struct rte_dispatcher_handler) * n); + } + + lcore->num_handlers--; + + return 0; +} + +static int +evd_uninstall_handler(struct rte_dispatcher *dispatcher, int handler_id) +{ + unsigned int lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + struct rte_dispatcher_lcore *lcore = + &dispatcher->lcores[lcore_id]; + int rc; + + rc = evd_lcore_uninstall_handler(lcore, handler_id); + if (rc < 0) + return rc; + } + + return 0; +} + +int +rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id) +{ + return evd_uninstall_handler(dispatcher, handler_id); +} + +static struct rte_dispatcher_finalizer * +evd_get_finalizer_by_id(struct rte_dispatcher *dispatcher, + int handler_id) +{ + int i; + + for (i = 0; i < dispatcher->num_finalizers; i++) { + struct rte_dispatcher_finalizer *finalizer = + &dispatcher->finalizers[i]; + + if (finalizer->id == handler_id) + return finalizer; + } + + return NULL; +} + +static int +evd_alloc_finalizer_id(struct rte_dispatcher *dispatcher) +{ + int finalizer_id = 0; + + while (evd_get_finalizer_by_id(dispatcher, finalizer_id) != NULL) + finalizer_id++; + + return finalizer_id; +} + +static struct rte_dispatcher_finalizer * +evd_alloc_finalizer(struct rte_dispatcher *dispatcher) +{ + int finalizer_idx; + struct rte_dispatcher_finalizer *finalizer; + + if (dispatcher->num_finalizers == EVD_MAX_FINALIZERS) + return NULL; + + finalizer_idx = dispatcher->num_finalizers; + finalizer = &dispatcher->finalizers[finalizer_idx]; + + finalizer->id = evd_alloc_finalizer_id(dispatcher); + + dispatcher->num_finalizers++; + + return finalizer; +} + +int +rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher, + rte_dispatcher_finalize_t finalize_fun, void *finalize_data) +{ + struct rte_dispatcher_finalizer *finalizer; + + finalizer = evd_alloc_finalizer(dispatcher); + + if (finalizer == NULL) + return -ENOMEM; + + finalizer->finalize_fun = finalize_fun; + finalizer->finalize_data = finalize_data; + + return finalizer->id; +} + +int +rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher, + int finalizer_id) +{ + struct rte_dispatcher_finalizer *unreg_finalizer; + int finalizer_idx; + uint16_t last_idx; + + unreg_finalizer = evd_get_finalizer_by_id(dispatcher, finalizer_id); + + if (unreg_finalizer == NULL) { + RTE_EDEV_LOG_ERR("Invalid finalizer id %d\n", finalizer_id); + return -EINVAL; + } + + finalizer_idx = unreg_finalizer - &dispatcher->finalizers[0]; + + last_idx = dispatcher->num_finalizers - 1; + + if (finalizer_idx != last_idx) { + /* move all finalizers to maintain order */ + int n = last_idx - finalizer_idx; + memmove(unreg_finalizer, unreg_finalizer + 1, + sizeof(struct rte_dispatcher_finalizer) * n); + } + + dispatcher->num_finalizers--; + + return 0; +} + +static void +evd_set_service_runstate(struct rte_dispatcher *dispatcher, int state) +{ + int rc; + + rc = rte_service_component_runstate_set(dispatcher->service_id, + state); + /* + * The only cause of a runstate_set() failure is an invalid + * service id, which in turns means the dispatcher instance's + * state is invalid. + */ + if (rc != 0) + RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting " + "service component run state to %d\n", rc, + state); + + RTE_VERIFY(rc == 0); +} + +void +rte_dispatcher_start(struct rte_dispatcher *dispatcher) +{ + evd_set_service_runstate(dispatcher, 1); +} + +void +rte_dispatcher_stop(struct rte_dispatcher *dispatcher) +{ + evd_set_service_runstate(dispatcher, 0); +} + +static void +evd_aggregate_stats(struct rte_dispatcher_stats *result, + const struct rte_dispatcher_stats *part) +{ + result->poll_count += part->poll_count; + result->ev_batch_count += part->ev_batch_count; + result->ev_dispatch_count += part->ev_dispatch_count; + result->ev_drop_count += part->ev_drop_count; +} + +void +rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher, + struct rte_dispatcher_stats *stats) +{ + unsigned int lcore_id; + + *stats = (struct rte_dispatcher_stats) {}; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + const struct rte_dispatcher_lcore *lcore = + &dispatcher->lcores[lcore_id]; + + evd_aggregate_stats(stats, &lcore->stats); + } +} + +void +rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher) +{ + unsigned int lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + struct rte_dispatcher_lcore *lcore = + &dispatcher->lcores[lcore_id]; + + lcore->stats = (struct rte_dispatcher_stats) {}; + } +} diff --git a/lib/dispatcher/rte_dispatcher.h b/lib/dispatcher/rte_dispatcher.h new file mode 100644 index 00000000000..d8182d5f2c0 --- /dev/null +++ b/lib/dispatcher/rte_dispatcher.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Ericsson AB + */ + +#ifndef RTE_DISPATCHER_H +#define RTE_DISPATCHER_H + +/** + * @file + * + * RTE Dispatcher + * + * @warning + * @b EXPERIMENTAL: + * All functions in this file may be changed or removed without prior notice. + * + * The purpose of the dispatcher is to help decouple different parts + * of an application (e.g., modules), sharing the same underlying + * event device. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include +#include + +/** + * Function prototype for match callbacks. + * + * Match callbacks are used by an application to decide how the + * dispatcher distributes events to different parts of the + * application. + * + * The application is not expected to process the event at the point + * of the match call. Such matters should be deferred to the process + * callback invocation. + * + * The match callback may be used as an opportunity to prefetch data. + * + * @param event + * Pointer to event + * + * @param cb_data + * The pointer supplied by the application in + * rte_dispatcher_register(). + * + * @return + * Returns true in case this event should be delivered (via + * the process callback), and false otherwise. + */ +typedef bool (*rte_dispatcher_match_t)(const struct rte_event *event, + void *cb_data); + +/** + * Function prototype for process callbacks. + * + * The process callbacks are used by the dispatcher to deliver + * events for processing. + * + * @param event_dev_id + * The originating event device id. + * + * @param event_port_id + * The originating event port. + * + * @param events + * Pointer to an array of events. + * + * @param num + * The number of events in the @p events array. + * + * @param cb_data + * The pointer supplied by the application in + * rte_dispatcher_register(). + */ +typedef void (*rte_dispatcher_process_t)(uint8_t event_dev_id, + uint8_t event_port_id, struct rte_event *events, uint16_t num, + void *cb_data); + +/** + * Function prototype for finalize callbacks. + * + * The finalize callbacks are used by the dispatcher to notify the + * application it has delivered all events from a particular batch + * dequeued from the event device. + * + * @param event_dev_id + * The originating event device id. + * + * @param event_port_id + * The originating event port. + * + * @param cb_data + * The pointer supplied by the application in + * rte_dispatcher_finalize_register(). + */ +typedef void (*rte_dispatcher_finalize_t)(uint8_t event_dev_id, + uint8_t event_port_id, void *cb_data); + +/** + * Dispatcher statistics + */ +struct rte_dispatcher_stats { + /** Number of event dequeue calls made toward the event device. */ + uint64_t poll_count; + /** Number of non-empty event batches dequeued from event device.*/ + uint64_t ev_batch_count; + /** Number of events dispatched to a handler.*/ + uint64_t ev_dispatch_count; + /** Number of events dropped because no handler was found. */ + uint64_t ev_drop_count; +}; + +/** + * Create a dispatcher with the specified id. + * + * @param event_dev_id + * The identifier of the event device from which this dispatcher + * will dequeue events. + * + * @return + * A pointer to a new dispatcher instance, or NULL on failure, in which + * case rte_errno is set. + */ +__rte_experimental +struct rte_dispatcher * +rte_dispatcher_create(uint8_t event_dev_id); + +/** + * Free a dispatcher. + * + * @param dispatcher + * The dispatcher instance. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int +rte_dispatcher_free(struct rte_dispatcher *dispatcher); + +/** + * Retrieve the service identifier of a dispatcher. + * + * @param dispatcher + * The dispatcher instance. + * + * @return + * The dispatcher service's id. + */ +__rte_experimental +uint32_t +rte_dispatcher_service_id_get(const struct rte_dispatcher *dispatcher); + +/** + * Binds an event device port to a specific lcore on the specified + * dispatcher. + * + * This function configures the event port id to be used by the event + * dispatcher service, if run on the specified lcore. + * + * Multiple event device ports may be bound to the same lcore. A + * particular port must not be bound to more than one lcore. + * + * If the dispatcher service is mapped (with rte_service_map_lcore_set()) + * to a lcore to which no ports are bound, the service function will be a + * no-operation. + * + * This function may be called by any thread (including unregistered + * non-EAL threads), but not while the dispatcher is running on lcore + * specified by @c lcore_id. + * + * @param dispatcher + * The dispatcher instance. + * + * @param event_port_id + * The event device port identifier. + * + * @param batch_size + * The batch size to use in rte_event_dequeue_burst(), for the + * configured event device port and lcore. + * + * @param timeout + * The timeout parameter to use in rte_event_dequeue_burst(), for the + * configured event device port and lcore. + * + * @param lcore_id + * The lcore by which this event port will be used. + * + * @return + * - 0: Success + * - -ENOMEM: Unable to allocate sufficient resources. + * - -EEXISTS: Event port is already configured. + * - -EINVAL: Invalid arguments. + */ +__rte_experimental +int +rte_dispatcher_bind_port_to_lcore(struct rte_dispatcher *dispatcher, + uint8_t event_port_id, uint16_t batch_size, uint64_t timeout, + unsigned int lcore_id); + +/** + * Unbind an event device port from a specific lcore. + * + * This function may be called by any thread (including unregistered + * non-EAL threads), but not while the dispatcher is running on + * lcore specified by @c lcore_id. + * + * @param dispatcher + * The dispatcher instance. + * + * @param event_port_id + * The event device port identifier. + * + * @param lcore_id + * The lcore which was using this event port. + * + * @return + * - 0: Success + * - -ENOENT: Event port id not bound to this @c lcore_id. + */ +__rte_experimental +int +rte_dispatcher_unbind_port_from_lcore(struct rte_dispatcher *dispatcher, + uint8_t event_port_id, unsigned int lcore_id); + +/** + * Register an event handler. + * + * The match callback function is used to select if a particular event + * should be delivered, using the corresponding process callback + * function. + * + * The reason for having two distinct steps is to allow the dispatcher + * to deliver all events as a batch. This in turn will cause + * processing of a particular kind of events to happen in a + * back-to-back manner, improving cache locality. + * + * The list of handler callback functions is shared among all lcores, + * but will only be executed on lcores which has an eventdev port + * bound to them, and which are running the dispatcher service. + * + * An event is delivered to at most one handler. Events where no + * handler is found are dropped. + * + * The application must not depend on the order of which the match + * functions are invoked. + * + * Ordering of events is not guaranteed to be maintained between + * different deliver callbacks. For example, suppose there are two + * callbacks registered, matching different subsets of events arriving + * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued + * on a particular port, all pertaining to the same flow. The match + * callback for registration A returns true for ev0 and ev2, and the + * matching function for registration B for ev1. In that scenario, the + * dispatcher may choose to deliver first [ev0, ev2] using A's deliver + * function, and then [ev1] to B - or vice versa. + * + * rte_dispatcher_register() may be called by any thread + * (including unregistered non-EAL threads), but not while the event + * dispatcher is running on any service lcore. + * + * @param dispatcher + * The dispatcher instance. + * + * @param match_fun + * The match callback function. + * + * @param match_cb_data + * A pointer to some application-specific opaque data (or NULL), + * which is supplied back to the application when match_fun is + * called. + * + * @param process_fun + * The process callback function. + * + * @param process_cb_data + * A pointer to some application-specific opaque data (or NULL), + * which is supplied back to the application when process_fun is + * called. + * + * @return + * - >= 0: The identifier for this registration. + * - -ENOMEM: Unable to allocate sufficient resources. + */ +__rte_experimental +int +rte_dispatcher_register(struct rte_dispatcher *dispatcher, + rte_dispatcher_match_t match_fun, void *match_cb_data, + rte_dispatcher_process_t process_fun, void *process_cb_data); + +/** + * Unregister an event handler. + * + * This function may be called by any thread (including unregistered + * non-EAL threads), but not while the dispatcher is running on + * any service lcore. + * + * @param dispatcher + * The dispatcher instance. + * + * @param handler_id + * The handler registration id returned by the original + * rte_dispatcher_register() call. + * + * @return + * - 0: Success + * - -EINVAL: The @c handler_id parameter was invalid. + */ +__rte_experimental +int +rte_dispatcher_unregister(struct rte_dispatcher *dispatcher, int handler_id); + +/** + * Register a finalize callback function. + * + * An application may optionally install one or more finalize + * callbacks. + * + * All finalize callbacks are invoked by the dispatcher when a + * complete batch of events (retrieve using rte_event_dequeue_burst()) + * have been delivered to the application (or have been dropped). + * + * The finalize callback is not tied to any particular handler. + * + * The finalize callback provides an opportunity for the application + * to do per-batch processing. One case where this may be useful is if + * an event output buffer is used, and is shared among several + * handlers. In such a case, proper output buffer flushing may be + * assured using a finalize callback. + * + * rte_dispatcher_finalize_register() may be called by any thread + * (including unregistered non-EAL threads), but not while the + * dispatcher is running on any service lcore. + * + * @param dispatcher + * The dispatcher instance. + * + * @param finalize_fun + * The function called after completing the processing of a + * dequeue batch. + * + * @param finalize_data + * A pointer to some application-specific opaque data (or NULL), + * which is supplied back to the application when @c finalize_fun is + * called. + * + * @return + * - >= 0: The identifier for this registration. + * - -ENOMEM: Unable to allocate sufficient resources. + */ +__rte_experimental +int +rte_dispatcher_finalize_register(struct rte_dispatcher *dispatcher, + rte_dispatcher_finalize_t finalize_fun, void *finalize_data); + +/** + * Unregister a finalize callback. + * + * This function may be called by any thread (including unregistered + * non-EAL threads), but not while the dispatcher is running on + * any service lcore. + * + * @param dispatcher + * The dispatcher instance. + * + * @param reg_id + * The finalize registration id returned by the original + * rte_dispatcher_finalize_register() call. + * + * @return + * - 0: Success + * - -EINVAL: The @c reg_id parameter was invalid. + */ +__rte_experimental +int +rte_dispatcher_finalize_unregister(struct rte_dispatcher *dispatcher, int reg_id); + +/** + * Start a dispatcher instance. + * + * Enables the dispatcher service. + * + * The underlying event device must have been started prior to calling + * rte_dispatcher_start(). + * + * For the dispatcher to actually perform work (i.e., dispatch + * events), its service must have been mapped to one or more service + * lcores, and its service run state set to '1'. A dispatcher's + * service is retrieved using rte_dispatcher_service_id_get(). + * + * Each service lcore to which the dispatcher is mapped should + * have at least one event port configured. Such configuration is + * performed by calling rte_dispatcher_bind_port_to_lcore(), prior to + * starting the dispatcher. + * + * @param dispatcher + * The dispatcher instance. + */ +__rte_experimental +void +rte_dispatcher_start(struct rte_dispatcher *dispatcher); + +/** + * Stop a running dispatcher instance. + * + * Disables the dispatcher service. + * + * @param dispatcher + * The dispatcher instance. + */ +__rte_experimental +void +rte_dispatcher_stop(struct rte_dispatcher *dispatcher); + +/** + * Retrieve statistics for a dispatcher instance. + * + * This function is MT safe and may be called by any thread + * (including unregistered non-EAL threads). + * + * @param dispatcher + * The dispatcher instance. + * @param[out] stats + * A pointer to a structure to fill with statistics. + */ +__rte_experimental +void +rte_dispatcher_stats_get(const struct rte_dispatcher *dispatcher, + struct rte_dispatcher_stats *stats); + +/** + * Reset statistics for a dispatcher instance. + * + * This function may be called by any thread (including unregistered + * non-EAL threads), but may not produce the correct result if the + * dispatcher is running on any service lcore. + * + * @param dispatcher + * The dispatcher instance. + */ +__rte_experimental +void +rte_dispatcher_stats_reset(struct rte_dispatcher *dispatcher); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_DISPATCHER_H */ diff --git a/lib/dispatcher/version.map b/lib/dispatcher/version.map new file mode 100644 index 00000000000..44585e4f158 --- /dev/null +++ b/lib/dispatcher/version.map @@ -0,0 +1,20 @@ +EXPERIMENTAL { + global: + + # added in 23.11 + rte_dispatcher_bind_port_to_lcore; + rte_dispatcher_create; + rte_dispatcher_finalize_register; + rte_dispatcher_finalize_unregister; + rte_dispatcher_free; + rte_dispatcher_register; + rte_dispatcher_service_id_get; + rte_dispatcher_start; + rte_dispatcher_stats_get; + rte_dispatcher_stats_reset; + rte_dispatcher_stop; + rte_dispatcher_unbind_port_from_lcore; + rte_dispatcher_unregister; + + local: *; +}; diff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h index 7101f632348..2f29343cc16 100644 --- a/lib/distributor/distributor_private.h +++ b/lib/distributor/distributor_private.h @@ -52,7 +52,7 @@ * Only 64-bits of the memory is actually used though. */ union rte_distributor_buffer_single { - volatile int64_t bufptr64; + volatile RTE_ATOMIC(int64_t) bufptr64; char pad[RTE_CACHE_LINE_SIZE*3]; } __rte_cache_aligned; diff --git a/lib/distributor/rte_distributor_single.c b/lib/distributor/rte_distributor_single.c index 2c77ac454a0..ad43c13dd09 100644 --- a/lib/distributor/rte_distributor_single.c +++ b/lib/distributor/rte_distributor_single.c @@ -32,10 +32,10 @@ rte_distributor_request_pkt_single(struct rte_distributor_single *d, int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_GET_BUF; RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK, - ==, 0, __ATOMIC_RELAXED); + ==, 0, rte_memory_order_relaxed); /* Sync with distributor on GET_BUF flag. */ - __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&buf->bufptr64, req, rte_memory_order_release); } struct rte_mbuf * @@ -44,7 +44,7 @@ rte_distributor_poll_pkt_single(struct rte_distributor_single *d, { union rte_distributor_buffer_single *buf = &d->bufs[worker_id]; /* Sync with distributor. Acquire bufptr64. */ - if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE) + if (rte_atomic_load_explicit(&buf->bufptr64, rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF) return NULL; @@ -72,10 +72,10 @@ rte_distributor_return_pkt_single(struct rte_distributor_single *d, uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF; RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK, - ==, 0, __ATOMIC_RELAXED); + ==, 0, rte_memory_order_relaxed); /* Sync with distributor on RETURN_BUF flag. */ - __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&buf->bufptr64, req, rte_memory_order_release); return 0; } @@ -119,7 +119,7 @@ handle_worker_shutdown(struct rte_distributor_single *d, unsigned int wkr) d->in_flight_tags[wkr] = 0; d->in_flight_bitmask &= ~(1UL << wkr); /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, 0, rte_memory_order_release); if (unlikely(d->backlog[wkr].count != 0)) { /* On return of a packet, we need to move the * queued packets for this core elsewhere. @@ -165,21 +165,21 @@ process_returns(struct rte_distributor_single *d) for (wkr = 0; wkr < d->num_workers; wkr++) { uintptr_t oldbuf = 0; /* Sync with worker. Acquire bufptr64. */ - const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64), - __ATOMIC_ACQUIRE); + const int64_t data = rte_atomic_load_explicit(&d->bufs[wkr].bufptr64, + rte_memory_order_acquire); if (data & RTE_DISTRIB_GET_BUF) { flushed++; if (d->backlog[wkr].count) /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, backlog_pop(&d->backlog[wkr]), - __ATOMIC_RELEASE); + rte_memory_order_release); else { /* Sync with worker on GET_BUF flag. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, RTE_DISTRIB_GET_BUF, - __ATOMIC_RELEASE); + rte_memory_order_release); d->in_flight_tags[wkr] = 0; d->in_flight_bitmask &= ~(1UL << wkr); } @@ -217,8 +217,8 @@ rte_distributor_process_single(struct rte_distributor_single *d, while (next_idx < num_mbufs || next_mb != NULL) { uintptr_t oldbuf = 0; /* Sync with worker. Acquire bufptr64. */ - int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64), - __ATOMIC_ACQUIRE); + int64_t data = rte_atomic_load_explicit(&(d->bufs[wkr].bufptr64), + rte_memory_order_acquire); if (!next_mb) { next_mb = mbufs[next_idx++]; @@ -264,15 +264,15 @@ rte_distributor_process_single(struct rte_distributor_single *d, if (d->backlog[wkr].count) /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, backlog_pop(&d->backlog[wkr]), - __ATOMIC_RELEASE); + rte_memory_order_release); else { /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, next_value, - __ATOMIC_RELEASE); + rte_memory_order_release); d->in_flight_tags[wkr] = new_tag; d->in_flight_bitmask |= (1UL << wkr); next_mb = NULL; @@ -294,8 +294,8 @@ rte_distributor_process_single(struct rte_distributor_single *d, for (wkr = 0; wkr < d->num_workers; wkr++) if (d->backlog[wkr].count && /* Sync with worker. Acquire bufptr64. */ - (__atomic_load_n(&(d->bufs[wkr].bufptr64), - __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) { + (rte_atomic_load_explicit(&d->bufs[wkr].bufptr64, + rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) { int64_t oldbuf = d->bufs[wkr].bufptr64 >> RTE_DISTRIB_FLAG_BITS; @@ -303,9 +303,9 @@ rte_distributor_process_single(struct rte_distributor_single *d, store_return(oldbuf, d, &ret_start, &ret_count); /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, backlog_pop(&d->backlog[wkr]), - __ATOMIC_RELEASE); + rte_memory_order_release); } d->returns.start = ret_start; diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c index 8c095e1f35c..bf7d5ec519b 100644 --- a/lib/dmadev/rte_dmadev.c +++ b/lib/dmadev/rte_dmadev.c @@ -758,7 +758,7 @@ dma_dump_capability(FILE *f, uint64_t dev_capa) (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa); while (dev_capa > 0) { - capa = 1ull << __builtin_ctzll(dev_capa); + capa = 1ull << rte_ctz64(dev_capa); (void)fprintf(f, " %s", dma_capability_name(capa)); dev_capa &= ~capa; } diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h index b157ab7600f..493263a5d62 100644 --- a/lib/dmadev/rte_dmadev.h +++ b/lib/dmadev/rte_dmadev.h @@ -269,6 +269,14 @@ int16_t rte_dma_next_dev(int16_t start_dev_id); * must ensure that all memory addresses are valid and accessible by HW. */ #define RTE_DMA_CAPA_HANDLES_ERRORS RTE_BIT64(6) +/** Support auto free for source buffer once mem to dev transfer is completed. + * + * @note Even though the DMA driver has this capability, it may not support all + * mempool drivers. If the mempool is not supported by the DMA driver, + * rte_dma_vchan_setup() will fail. + */ +#define RTE_DMA_CAPA_M2D_AUTO_FREE RTE_BIT64(7) + /** Support copy operation. * This capability start with index of 32, so that it could leave gap between * normal capability and ops capability. @@ -552,6 +560,26 @@ struct rte_dma_port_param { uint64_t reserved[2]; /**< Reserved for future fields. */ }; +/** + * A structure used for offload auto free params. + */ +struct rte_dma_auto_free_param { + union { + struct { + /** + * Mempool from which buffer is allocated. Mempool info + * is used for freeing buffer by hardware. + * + * @note If the mempool is not supported by the DMA device, + * rte_dma_vchan_setup() will fail. + */ + struct rte_mempool *pool; + } m2d; + }; + /** Reserved for future fields. */ + uint64_t reserved[2]; +}; + /** * A structure used to configure a virtual DMA channel. * @@ -581,6 +609,14 @@ struct rte_dma_vchan_conf { * @see struct rte_dma_port_param */ struct rte_dma_port_param dst_port; + /** Buffer params to auto free buffer by hardware. To free the buffer + * by hardware, RTE_DMA_OP_FLAG_AUTO_FREE must be set while calling + * rte_dma_copy and rte_dma_copy_sg(). + * + * @see RTE_DMA_OP_FLAG_AUTO_FREE + * @see struct rte_dma_auto_free_param + */ + struct rte_dma_auto_free_param auto_free; }; /** @@ -818,6 +854,13 @@ struct rte_dma_sge { * capability bit for this, driver should not return error if this flag was set. */ #define RTE_DMA_OP_FLAG_LLC RTE_BIT64(2) +/** Auto free buffer flag. + * Operation with this flag must issue command to hardware to free the DMA + * buffer after DMA transfer is completed. + * + * @see struct rte_dma_vchan_conf::auto_free + */ +#define RTE_DMA_OP_FLAG_AUTO_FREE RTE_BIT64(3) /**@}*/ /** diff --git a/lib/eal/arm/include/rte_atomic_32.h b/lib/eal/arm/include/rte_atomic_32.h index c00ab78dba1..62fc33773d6 100644 --- a/lib/eal/arm/include/rte_atomic_32.h +++ b/lib/eal/arm/include/rte_atomic_32.h @@ -34,9 +34,9 @@ extern "C" { #define rte_io_rmb() rte_rmb() static __rte_always_inline void -rte_atomic_thread_fence(int memorder) +rte_atomic_thread_fence(rte_memory_order memorder) { - __atomic_thread_fence(memorder); + __rte_atomic_thread_fence(memorder); } #ifdef __cplusplus diff --git a/lib/eal/arm/include/rte_atomic_64.h b/lib/eal/arm/include/rte_atomic_64.h index 60479115076..75d8ba6092c 100644 --- a/lib/eal/arm/include/rte_atomic_64.h +++ b/lib/eal/arm/include/rte_atomic_64.h @@ -38,9 +38,9 @@ extern "C" { #define rte_io_rmb() rte_rmb() static __rte_always_inline void -rte_atomic_thread_fence(int memorder) +rte_atomic_thread_fence(rte_memory_order memorder) { - __atomic_thread_fence(memorder); + __rte_atomic_thread_fence(memorder); } /*------------------------ 128 bit atomic operations -------------------------*/ @@ -107,33 +107,33 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, */ RTE_SET_USED(failure); /* Find invalid memory order */ - RTE_ASSERT(success == __ATOMIC_RELAXED || - success == __ATOMIC_ACQUIRE || - success == __ATOMIC_RELEASE || - success == __ATOMIC_ACQ_REL || - success == __ATOMIC_SEQ_CST); + RTE_ASSERT(success == rte_memory_order_relaxed || + success == rte_memory_order_acquire || + success == rte_memory_order_release || + success == rte_memory_order_acq_rel || + success == rte_memory_order_seq_cst); rte_int128_t expected = *exp; rte_int128_t desired = *src; rte_int128_t old; #if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS) - if (success == __ATOMIC_RELAXED) + if (success == rte_memory_order_relaxed) __cas_128_relaxed(dst, exp, desired); - else if (success == __ATOMIC_ACQUIRE) + else if (success == rte_memory_order_acquire) __cas_128_acquire(dst, exp, desired); - else if (success == __ATOMIC_RELEASE) + else if (success == rte_memory_order_release) __cas_128_release(dst, exp, desired); else __cas_128_acq_rel(dst, exp, desired); old = *exp; #else -#define __HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE) -#define __HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \ - (mo) == __ATOMIC_SEQ_CST) +#define __HAS_ACQ(mo) ((mo) != rte_memory_order_relaxed && (mo) != rte_memory_order_release) +#define __HAS_RLS(mo) ((mo) == rte_memory_order_release || (mo) == rte_memory_order_acq_rel || \ + (mo) == rte_memory_order_seq_cst) - int ldx_mo = __HAS_ACQ(success) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED; - int stx_mo = __HAS_RLS(success) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED; + int ldx_mo = __HAS_ACQ(success) ? rte_memory_order_acquire : rte_memory_order_relaxed; + int stx_mo = __HAS_RLS(success) ? rte_memory_order_release : rte_memory_order_relaxed; #undef __HAS_ACQ #undef __HAS_RLS @@ -153,7 +153,7 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, : "Q" (src->val[0]) \ : "memory"); } - if (ldx_mo == __ATOMIC_RELAXED) + if (ldx_mo == rte_memory_order_relaxed) __LOAD_128("ldxp", dst, old) else __LOAD_128("ldaxp", dst, old) @@ -170,7 +170,7 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, : "memory"); } if (likely(old.int128 == expected.int128)) { - if (stx_mo == __ATOMIC_RELAXED) + if (stx_mo == rte_memory_order_relaxed) __STORE_128("stxp", dst, desired, ret) else __STORE_128("stlxp", dst, desired, ret) @@ -181,7 +181,7 @@ rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, * needs to be stored back to ensure it was read * atomically. */ - if (stx_mo == __ATOMIC_RELAXED) + if (stx_mo == rte_memory_order_relaxed) __STORE_128("stxp", dst, old, ret) else __STORE_128("stlxp", dst, old, ret) diff --git a/lib/eal/arm/include/rte_byteorder.h b/lib/eal/arm/include/rte_byteorder.h index df2f1d87ba8..ff02052f2e8 100644 --- a/lib/eal/arm/include/rte_byteorder.h +++ b/lib/eal/arm/include/rte_byteorder.h @@ -17,25 +17,6 @@ extern "C" { #include #include "generic/rte_byteorder.h" -/* fix missing __builtin_bswap16 for gcc older then 4.8 */ -#if !(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) - -static inline uint16_t rte_arch_bswap16(uint16_t _x) -{ - uint16_t x = _x; - - asm volatile ("rev16 %w0,%w1" - : "=r" (x) - : "r" (x) - ); - return x; -} - -#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap16(x) : \ - rte_arch_bswap16(x))) -#endif - /* ARM architecture is bi-endian (both big and little). */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN diff --git a/lib/eal/arm/include/rte_cpuflags_32.h b/lib/eal/arm/include/rte_cpuflags_32.h index 4e254428a2a..770b09b99d9 100644 --- a/lib/eal/arm/include/rte_cpuflags_32.h +++ b/lib/eal/arm/include/rte_cpuflags_32.h @@ -42,8 +42,6 @@ enum rte_cpu_flag_t { RTE_CPUFLAG_CRC32, RTE_CPUFLAG_V7L, RTE_CPUFLAG_V8L, - /* The last item */ - RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */ }; #include "generic/rte_cpuflags.h" diff --git a/lib/eal/arm/include/rte_cpuflags_64.h b/lib/eal/arm/include/rte_cpuflags_64.h index aa7a56d4915..afe70209c38 100644 --- a/lib/eal/arm/include/rte_cpuflags_64.h +++ b/lib/eal/arm/include/rte_cpuflags_64.h @@ -36,8 +36,6 @@ enum rte_cpu_flag_t { RTE_CPUFLAG_SVEF64MM, RTE_CPUFLAG_SVEBF16, RTE_CPUFLAG_AARCH64, - /* The last item */ - RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */ }; #include "generic/rte_cpuflags.h" diff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h index 5f70e974815..5cb8b59056e 100644 --- a/lib/eal/arm/include/rte_pause_64.h +++ b/lib/eal/arm/include/rte_pause_64.h @@ -41,7 +41,7 @@ static inline void rte_pause(void) * implicitly to exit WFE. */ #define __RTE_ARM_LOAD_EXC_8(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ + if (memorder == rte_memory_order_relaxed) { \ asm volatile("ldxrb %w[tmp], [%x[addr]]" \ : [tmp] "=&r" (dst) \ : [addr] "r" (src) \ @@ -60,7 +60,7 @@ static inline void rte_pause(void) * implicitly to exit WFE. */ #define __RTE_ARM_LOAD_EXC_16(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ + if (memorder == rte_memory_order_relaxed) { \ asm volatile("ldxrh %w[tmp], [%x[addr]]" \ : [tmp] "=&r" (dst) \ : [addr] "r" (src) \ @@ -79,7 +79,7 @@ static inline void rte_pause(void) * implicitly to exit WFE. */ #define __RTE_ARM_LOAD_EXC_32(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ + if (memorder == rte_memory_order_relaxed) { \ asm volatile("ldxr %w[tmp], [%x[addr]]" \ : [tmp] "=&r" (dst) \ : [addr] "r" (src) \ @@ -98,7 +98,7 @@ static inline void rte_pause(void) * implicitly to exit WFE. */ #define __RTE_ARM_LOAD_EXC_64(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ + if (memorder == rte_memory_order_relaxed) { \ asm volatile("ldxr %x[tmp], [%x[addr]]" \ : [tmp] "=&r" (dst) \ : [addr] "r" (src) \ @@ -118,7 +118,7 @@ static inline void rte_pause(void) */ #define __RTE_ARM_LOAD_EXC_128(src, dst, memorder) { \ volatile rte_int128_t *dst_128 = (volatile rte_int128_t *)&dst; \ - if (memorder == __ATOMIC_RELAXED) { \ + if (memorder == rte_memory_order_relaxed) { \ asm volatile("ldxp %x[tmp0], %x[tmp1], [%x[addr]]" \ : [tmp0] "=&r" (dst_128->val[0]), \ [tmp1] "=&r" (dst_128->val[1]) \ @@ -153,8 +153,8 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, { uint16_t value; - RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && - memorder != __ATOMIC_RELAXED); + RTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire && + memorder != rte_memory_order_relaxed); __RTE_ARM_LOAD_EXC_16(addr, value, memorder) if (value != expected) { @@ -172,8 +172,8 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, { uint32_t value; - RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && - memorder != __ATOMIC_RELAXED); + RTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire && + memorder != rte_memory_order_relaxed); __RTE_ARM_LOAD_EXC_32(addr, value, memorder) if (value != expected) { @@ -191,8 +191,8 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, { uint64_t value; - RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && - memorder != __ATOMIC_RELAXED); + RTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire && + memorder != rte_memory_order_relaxed); __RTE_ARM_LOAD_EXC_64(addr, value, memorder) if (value != expected) { @@ -206,8 +206,8 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, #define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do { \ RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \ - RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \ - memorder != __ATOMIC_RELAXED); \ + RTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire && \ + memorder != rte_memory_order_relaxed); \ const uint32_t size = sizeof(*(addr)) << 3; \ typeof(*(addr)) expected_value = (expected); \ typeof(*(addr)) value; \ diff --git a/lib/eal/arm/include/rte_vect.h b/lib/eal/arm/include/rte_vect.h index 4b705bac5f6..8cfe4bddc12 100644 --- a/lib/eal/arm/include/rte_vect.h +++ b/lib/eal/arm/include/rte_vect.h @@ -83,18 +83,6 @@ vcopyq_laneq_u32(uint32x4_t a, const int lane_a, #if defined(RTE_ARCH_ARM64) #if RTE_CC_IS_GNU && (GCC_VERSION < 70000) -#if (GCC_VERSION < 40900) -typedef uint64_t poly64_t; -typedef uint64x2_t poly64x2_t; -typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16))); - -static inline uint32x4_t -vceqzq_u32(uint32x4_t a) -{ - return (a == 0); -} -#endif - /* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */ static inline uint64x2_t vreinterpretq_u64_p128(poly128_t x) diff --git a/lib/eal/arm/rte_cpuflags.c b/lib/eal/arm/rte_cpuflags.c index 56e7b2e6890..7ba4f8ba973 100644 --- a/lib/eal/arm/rte_cpuflags.c +++ b/lib/eal/arm/rte_cpuflags.c @@ -140,7 +140,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const struct feature_entry *feat; hwcap_registers_t regs = {0}; - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return -ENOENT; feat = &rte_cpu_feature_table[feature]; @@ -154,7 +154,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const char * rte_cpu_get_flag_name(enum rte_cpu_flag_t feature) { - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return NULL; return rte_cpu_feature_table[feature].name; } diff --git a/lib/eal/arm/rte_power_intrinsics.c b/lib/eal/arm/rte_power_intrinsics.c index 77b96e4aa34..f54cf59e808 100644 --- a/lib/eal/arm/rte_power_intrinsics.c +++ b/lib/eal/arm/rte_power_intrinsics.c @@ -33,19 +33,19 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc, switch (pmc->size) { case sizeof(uint8_t): - __RTE_ARM_LOAD_EXC_8(pmc->addr, cur_value, __ATOMIC_RELAXED) + __RTE_ARM_LOAD_EXC_8(pmc->addr, cur_value, rte_memory_order_relaxed) __RTE_ARM_WFE() break; case sizeof(uint16_t): - __RTE_ARM_LOAD_EXC_16(pmc->addr, cur_value, __ATOMIC_RELAXED) + __RTE_ARM_LOAD_EXC_16(pmc->addr, cur_value, rte_memory_order_relaxed) __RTE_ARM_WFE() break; case sizeof(uint32_t): - __RTE_ARM_LOAD_EXC_32(pmc->addr, cur_value, __ATOMIC_RELAXED) + __RTE_ARM_LOAD_EXC_32(pmc->addr, cur_value, rte_memory_order_relaxed) __RTE_ARM_WFE() break; case sizeof(uint64_t): - __RTE_ARM_LOAD_EXC_64(pmc->addr, cur_value, __ATOMIC_RELAXED) + __RTE_ARM_LOAD_EXC_64(pmc->addr, cur_value, rte_memory_order_relaxed) __RTE_ARM_WFE() break; default: diff --git a/lib/eal/common/eal_common_dynmem.c b/lib/eal/common/eal_common_dynmem.c index bdbbe233a0e..95da55d9b08 100644 --- a/lib/eal/common/eal_common_dynmem.c +++ b/lib/eal/common/eal_common_dynmem.c @@ -251,7 +251,10 @@ eal_dynmem_hugepage_init(void) */ memset(&dummy, 0, sizeof(dummy)); dummy.hugepage_sz = hpi->hugepage_sz; - if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0) + /* memory_hotplug_lock is held during initialization, so it's + * safe to call thread-unsafe version. + */ + if (rte_memseg_list_walk_thread_unsafe(hugepage_count_walk, &dummy) < 0) return -1; for (i = 0; i < RTE_DIM(dummy.num_pages); i++) { diff --git a/lib/eal/common/eal_common_fbarray.c b/lib/eal/common/eal_common_fbarray.c index 169e66e04b3..2055bfa57dd 100644 --- a/lib/eal/common/eal_common_fbarray.c +++ b/lib/eal/common/eal_common_fbarray.c @@ -189,7 +189,7 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, tmp_msk &= tmp_msk >> 1ULL; /* we found what we were looking for */ if (tmp_msk != 0) { - run_start = __builtin_ctzll(tmp_msk); + run_start = rte_ctz64(tmp_msk); return MASK_GET_IDX(msk_idx, run_start); } } @@ -203,7 +203,7 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, if (~cur_msk == 0) clz = sizeof(cur_msk) * 8; else - clz = __builtin_clzll(~cur_msk); + clz = rte_clz64(~cur_msk); /* if there aren't any runs at the end either, just continue */ if (clz == 0) @@ -308,7 +308,7 @@ find_next(const struct rte_fbarray *arr, unsigned int start, bool used) * find first set bit - that will correspond to whatever it is * that we're looking for. */ - found = __builtin_ctzll(cur); + found = rte_ctz64(cur); return MASK_GET_IDX(idx, found); } /* we didn't find anything */ @@ -366,7 +366,7 @@ find_contig(const struct rte_fbarray *arr, unsigned int start, bool used) /* * see if current run ends before mask end. */ - run_len = __builtin_ctzll(cur); + run_len = rte_ctz64(cur); /* add however many zeroes we've had in the last run and quit */ if (run_len < need_len) { @@ -454,7 +454,7 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, * would have been. */ run_start = MASK_ALIGN - - __builtin_clzll(tmp_msk) - n; + rte_clz64(tmp_msk) - n; return MASK_GET_IDX(msk_idx, run_start); } } @@ -468,7 +468,7 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, if (~cur_msk == 0) ctz = sizeof(cur_msk) * 8; else - ctz = __builtin_ctzll(~cur_msk); + ctz = rte_ctz64(~cur_msk); /* if there aren't any runs at the start either, just * continue @@ -584,7 +584,7 @@ find_prev(const struct rte_fbarray *arr, unsigned int start, bool used) * the value we get is counted from end of mask, so calculate * position from start of mask. */ - found = MASK_ALIGN - __builtin_clzll(cur) - 1; + found = MASK_ALIGN - rte_clz64(cur) - 1; return MASK_GET_IDX(idx, found); } while (idx-- != 0); /* decrement after check to include zero*/ @@ -635,7 +635,7 @@ find_rev_contig(const struct rte_fbarray *arr, unsigned int start, bool used) /* * see where run ends, starting from the end. */ - run_len = __builtin_clzll(cur); + run_len = rte_clz64(cur); /* add however many zeroes we've had in the last run and quit */ if (run_len < need_len) { diff --git a/lib/eal/common/eal_common_hexdump.c b/lib/eal/common/eal_common_hexdump.c index 63bbbdcf0ab..6fd6e21e1ea 100644 --- a/lib/eal/common/eal_common_hexdump.c +++ b/lib/eal/common/eal_common_hexdump.c @@ -15,7 +15,7 @@ rte_hexdump(FILE *f, const char *title, const void *buf, unsigned int len) char line[LINE_LEN]; /* space needed 8+16*3+3+16 == 75 */ fprintf(f, "%s at [%p], len=%u\n", - title ? : " Dump data", data, len); + title != NULL ? title : " Dump data", data, len); ofs = 0; while (ofs < len) { /* format the line in the buffer */ diff --git a/lib/eal/common/eal_common_mcfg.c b/lib/eal/common/eal_common_mcfg.c index b60d41f7b69..2a785e74c4f 100644 --- a/lib/eal/common/eal_common_mcfg.c +++ b/lib/eal/common/eal_common_mcfg.c @@ -177,6 +177,12 @@ rte_mcfg_timer_unlock(void) rte_spinlock_unlock(rte_mcfg_timer_get_lock()); } +rte_spinlock_t * +rte_mcfg_ethdev_get_lock(void) +{ + return &rte_eal_get_configuration()->mem_config->ethdev_lock; +} + bool rte_mcfg_get_single_file_segments(void) { diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c index d74b581567d..a6d21f1cba8 100644 --- a/lib/eal/common/eal_common_options.c +++ b/lib/eal/common/eal_common_options.c @@ -5,6 +5,7 @@ #include #include +#include #ifndef RTE_EXEC_ENV_WINDOWS #include #endif diff --git a/lib/eal/common/eal_common_proc.c b/lib/eal/common/eal_common_proc.c index c4981f22a2e..f20a348ede5 100644 --- a/lib/eal/common/eal_common_proc.c +++ b/lib/eal/common/eal_common_proc.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "eal_memcfg.h" #include "eal_private.h" @@ -33,7 +34,7 @@ #include "eal_internal_cfg.h" static int mp_fd = -1; -static pthread_t mp_handle_tid; +static rte_thread_t mp_handle_tid; static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */ static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */ static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER; @@ -396,7 +397,7 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) } } -static void * +static uint32_t mp_handle(void *arg __rte_unused) { struct mp_msg_internal msg; @@ -413,7 +414,7 @@ mp_handle(void *arg __rte_unused) process_msg(&msg, &sa); } - return NULL; + return 0; } static int @@ -646,8 +647,8 @@ rte_mp_channel_init(void) return -1; } - if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle", - NULL, mp_handle, NULL) < 0) { + if (rte_thread_create_internal_control(&mp_handle_tid, "mp-msg", + mp_handle, NULL) < 0) { RTE_LOG(ERR, EAL, "failed to create mp thread: %s\n", strerror(errno)); close(dir_fd); @@ -671,8 +672,8 @@ rte_mp_channel_cleanup(void) if (fd < 0) return; - pthread_cancel(mp_handle_tid); - pthread_join(mp_handle_tid, NULL); + pthread_cancel((pthread_t)mp_handle_tid.opaque_id); + rte_thread_join(mp_handle_tid, NULL); close_socket_fd(fd); } diff --git a/lib/eal/common/eal_common_thread.c b/lib/eal/common/eal_common_thread.c index 6605bd017e7..668b1ed96bb 100644 --- a/lib/eal/common/eal_common_thread.c +++ b/lib/eal/common/eal_common_thread.c @@ -235,25 +235,22 @@ enum __rte_ctrl_thread_status { CTRL_THREAD_ERROR /* Control thread encountered an error */ }; -struct rte_thread_ctrl_params { - union { - void *(*ctrl_start_routine)(void *arg); - rte_thread_func control_start_routine; - } u; +struct control_thread_params { + rte_thread_func start_routine; void *arg; int ret; /* Control thread status. * If the status is CTRL_THREAD_ERROR, 'ret' has the error code. */ - enum __rte_ctrl_thread_status ctrl_thread_status; + enum __rte_ctrl_thread_status status; }; -static int ctrl_thread_init(void *arg) +static int control_thread_init(void *arg) { struct internal_config *internal_conf = eal_get_internal_configuration(); rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset; - struct rte_thread_ctrl_params *params = arg; + struct control_thread_params *params = arg; __rte_thread_init(rte_lcore_id(), cpuset); /* Set control thread socket ID to SOCKET_ID_ANY @@ -262,135 +259,105 @@ static int ctrl_thread_init(void *arg) RTE_PER_LCORE(_socket_id) = SOCKET_ID_ANY; params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset); if (params->ret != 0) { - __atomic_store_n(¶ms->ctrl_thread_status, + __atomic_store_n(¶ms->status, CTRL_THREAD_ERROR, __ATOMIC_RELEASE); return 1; } - __atomic_store_n(¶ms->ctrl_thread_status, + __atomic_store_n(¶ms->status, CTRL_THREAD_RUNNING, __ATOMIC_RELEASE); return 0; } -static void *ctrl_thread_start(void *arg) -{ - struct rte_thread_ctrl_params *params = arg; - void *start_arg = params->arg; - void *(*start_routine)(void *) = params->u.ctrl_start_routine; - - if (ctrl_thread_init(arg) != 0) - return NULL; - - return start_routine(start_arg); -} - static uint32_t control_thread_start(void *arg) { - struct rte_thread_ctrl_params *params = arg; + struct control_thread_params *params = arg; void *start_arg = params->arg; - rte_thread_func start_routine = params->u.control_start_routine; + rte_thread_func start_routine = params->start_routine; - if (ctrl_thread_init(arg) != 0) + if (control_thread_init(arg) != 0) return 0; return start_routine(start_arg); } int -rte_ctrl_thread_create(pthread_t *thread, const char *name, - const pthread_attr_t *attr, - void *(*start_routine)(void *), void *arg) +rte_thread_create_control(rte_thread_t *thread, const char *name, + rte_thread_func start_routine, void *arg) { - struct rte_thread_ctrl_params *params; + struct control_thread_params *params; enum __rte_ctrl_thread_status ctrl_thread_status; int ret; params = malloc(sizeof(*params)); - if (!params) + if (params == NULL) return -ENOMEM; - params->u.ctrl_start_routine = start_routine; + params->start_routine = start_routine; params->arg = arg; params->ret = 0; - params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; + params->status = CTRL_THREAD_LAUNCHING; - ret = pthread_create(thread, attr, ctrl_thread_start, (void *)params); + ret = rte_thread_create(thread, NULL, control_thread_start, params); if (ret != 0) { free(params); return -ret; } if (name != NULL) - rte_thread_set_name((rte_thread_t){(uintptr_t)*thread}, name); + rte_thread_set_name(*thread, name); /* Wait for the control thread to initialize successfully */ while ((ctrl_thread_status = - __atomic_load_n(¶ms->ctrl_thread_status, + __atomic_load_n(¶ms->status, __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { - /* Yield the CPU. Using sched_yield call requires maintaining - * another implementation for Windows as sched_yield is not - * supported on Windows. - */ rte_delay_us_sleep(1); } /* Check if the control thread encountered an error */ if (ctrl_thread_status == CTRL_THREAD_ERROR) { /* ctrl thread is exiting */ - rte_thread_join((rte_thread_t){(uintptr_t)*thread}, NULL); + rte_thread_join(*thread, NULL); } ret = params->ret; free(params); - return -ret; + return ret; } -int -rte_thread_create_control(rte_thread_t *thread, const char *name, - const rte_thread_attr_t *attr, rte_thread_func start_routine, - void *arg) +static void +add_internal_prefix(char *prefixed_name, const char *name, size_t size) { - struct rte_thread_ctrl_params *params; - enum __rte_ctrl_thread_status ctrl_thread_status; - int ret; + size_t prefixlen; - params = malloc(sizeof(*params)); - if (params == NULL) - return -ENOMEM; - - params->u.control_start_routine = start_routine; - params->arg = arg; - params->ret = 0; - params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; + /* Check RTE_THREAD_INTERNAL_NAME_SIZE definition. */ + RTE_BUILD_BUG_ON(RTE_THREAD_INTERNAL_NAME_SIZE != + RTE_THREAD_NAME_SIZE - sizeof(RTE_THREAD_INTERNAL_PREFIX) + 1); - ret = rte_thread_create(thread, attr, control_thread_start, params); - if (ret != 0) { - free(params); - return -ret; - } - - if (name != NULL) - rte_thread_set_name(*thread, name); + prefixlen = strlen(RTE_THREAD_INTERNAL_PREFIX); + strlcpy(prefixed_name, RTE_THREAD_INTERNAL_PREFIX, size); + strlcpy(prefixed_name + prefixlen, name, size - prefixlen); +} - /* Wait for the control thread to initialize successfully */ - while ((ctrl_thread_status = - __atomic_load_n(¶ms->ctrl_thread_status, - __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { - rte_delay_us_sleep(1); - } +int +rte_thread_create_internal_control(rte_thread_t *id, const char *name, + rte_thread_func func, void *arg) +{ + char prefixed_name[RTE_THREAD_NAME_SIZE]; - /* Check if the control thread encountered an error */ - if (ctrl_thread_status == CTRL_THREAD_ERROR) { - /* ctrl thread is exiting */ - rte_thread_join(*thread, NULL); - } + add_internal_prefix(prefixed_name, name, sizeof(prefixed_name)); + return rte_thread_create_control(id, prefixed_name, func, arg); +} - ret = params->ret; - free(params); +void +rte_thread_set_prefixed_name(rte_thread_t id, const char *name) +{ + char prefixed_name[RTE_THREAD_NAME_SIZE]; - return ret; + add_internal_prefix(prefixed_name, name, sizeof(prefixed_name)); + rte_thread_set_name(id, prefixed_name); } int diff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c index cb980af06d9..d2eac2db0db 100644 --- a/lib/eal/common/eal_common_trace.c +++ b/lib/eal/common/eal_common_trace.c @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -103,11 +104,11 @@ static void trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode) { if (mode == RTE_TRACE_MODE_OVERWRITE) - __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, - __ATOMIC_RELEASE); + rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, + rte_memory_order_release); else - __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_DISCARD, - __ATOMIC_RELEASE); + rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_DISCARD, + rte_memory_order_release); } void @@ -141,7 +142,7 @@ rte_trace_point_is_enabled(rte_trace_point_t *t) if (trace_point_is_invalid(t)) return false; - val = __atomic_load_n(t, __ATOMIC_ACQUIRE); + val = rte_atomic_load_explicit(t, rte_memory_order_acquire); return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0; } @@ -153,7 +154,8 @@ rte_trace_point_enable(rte_trace_point_t *t) if (trace_point_is_invalid(t)) return -ERANGE; - prev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); + prev = rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_MASK, + rte_memory_order_release); if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0) __atomic_fetch_add(&trace.status, 1, __ATOMIC_RELEASE); return 0; @@ -167,7 +169,8 @@ rte_trace_point_disable(rte_trace_point_t *t) if (trace_point_is_invalid(t)) return -ERANGE; - prev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); + prev = rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, + rte_memory_order_release); if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0) __atomic_fetch_sub(&trace.status, 1, __ATOMIC_RELEASE); return 0; diff --git a/lib/eal/common/eal_memalloc.h b/lib/eal/common/eal_memalloc.h index ebc3a6f6c1f..286ffb76335 100644 --- a/lib/eal/common/eal_memalloc.h +++ b/lib/eal/common/eal_memalloc.h @@ -91,7 +91,8 @@ int eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset); int -eal_memalloc_init(void); +eal_memalloc_init(void) + __rte_shared_locks_required(rte_mcfg_mem_get_lock()); int eal_memalloc_cleanup(void); diff --git a/lib/eal/common/eal_memcfg.h b/lib/eal/common/eal_memcfg.h index 8889ba063fe..d5c63e2f4d1 100644 --- a/lib/eal/common/eal_memcfg.h +++ b/lib/eal/common/eal_memcfg.h @@ -37,6 +37,7 @@ struct rte_mem_config { rte_rwlock_t qlock; /**< used by tailqs for thread safety. */ rte_rwlock_t mplock; /**< used by mempool library for thread safety. */ rte_spinlock_t tlock; /**< used by timer library for thread safety. */ + rte_spinlock_t ethdev_lock; /**< used by ethdev library. */ rte_rwlock_t memory_hotplug_lock; /**< Indicates whether memory hotplug request is in progress. */ diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h index 5eadba49024..ebd496b537c 100644 --- a/lib/eal/common/eal_private.h +++ b/lib/eal/common/eal_private.h @@ -115,7 +115,8 @@ int rte_eal_memseg_init(void); * @return * 0 on success, negative on error */ -int rte_eal_memory_init(void); +int rte_eal_memory_init(void) + __rte_shared_locks_required(rte_mcfg_mem_get_lock()); /** * Configure timers diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c index 619c040aa3e..f5d1c8c2e24 100644 --- a/lib/eal/common/malloc_elem.c +++ b/lib/eal/common/malloc_elem.c @@ -386,7 +386,7 @@ malloc_elem_free_list_index(size_t size) return 0; /* Find next power of 2 > size. */ - log2 = sizeof(size) * 8 - __builtin_clzl(size); + log2 = sizeof(size) * 8 - rte_clz64(size); /* Compute freelist index, based on log2(size). */ index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) / diff --git a/lib/eal/common/malloc_mp.c b/lib/eal/common/malloc_mp.c index 7270c2ec90f..4d62397aba0 100644 --- a/lib/eal/common/malloc_mp.c +++ b/lib/eal/common/malloc_mp.c @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/lib/eal/common/rte_malloc.c b/lib/eal/common/rte_malloc.c index ebafef3f6cf..9db0c399aef 100644 --- a/lib/eal/common/rte_malloc.c +++ b/lib/eal/common/rte_malloc.c @@ -41,13 +41,13 @@ mem_free(void *addr, const bool trace_ena) void rte_free(void *addr) { - return mem_free(addr, true); + mem_free(addr, true); } void eal_free_no_trace(void *addr) { - return mem_free(addr, false); + mem_free(addr, false); } static void * diff --git a/lib/eal/common/rte_random.c b/lib/eal/common/rte_random.c index 565f2401ce1..7709b8f2c69 100644 --- a/lib/eal/common/rte_random.c +++ b/lib/eal/common/rte_random.c @@ -7,6 +7,7 @@ #endif #include +#include #include #include #include @@ -18,6 +19,7 @@ struct rte_rand_state { uint64_t z3; uint64_t z4; uint64_t z5; + RTE_CACHE_GUARD; } __rte_cache_aligned; /* One instance each for every lcore id-equipped thread, and one @@ -83,7 +85,7 @@ rte_srand(uint64_t seed) unsigned int lcore_id; /* add lcore_id to seed to avoid having the same sequence */ - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) + for (lcore_id = 0; lcore_id < RTE_DIM(rand_states); lcore_id++) __rte_srand_lfsr258(seed + lcore_id, &rand_states[lcore_id]); } @@ -153,7 +155,7 @@ rte_rand_max(uint64_t upper_bound) state = __rte_rand_get_state(); - ones = __builtin_popcountll(upper_bound); + ones = rte_popcount64(upper_bound); /* Handle power-of-2 upper_bound as a special case, since it * has no bias issues. @@ -168,7 +170,7 @@ rte_rand_max(uint64_t upper_bound) * the value and generate a new one. */ - leading_zeros = __builtin_clzll(upper_bound); + leading_zeros = rte_clz64(upper_bound); mask >>= leading_zeros; do { diff --git a/lib/eal/common/rte_reciprocal.c b/lib/eal/common/rte_reciprocal.c index 42dfa44eb49..87c67e6d49d 100644 --- a/lib/eal/common/rte_reciprocal.c +++ b/lib/eal/common/rte_reciprocal.c @@ -8,6 +8,7 @@ #include #include +#include #include "rte_reciprocal.h" @@ -54,7 +55,7 @@ divide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) } /* Count leading zeros. */ - s = __builtin_clzll(v); + s = rte_clz64(v); if (s > 0) { v = v << s; un64 = (u1 << s) | ((u0 >> (64 - s)) & (-s >> 31)); @@ -106,7 +107,7 @@ rte_reciprocal_value_u64(uint64_t d) uint64_t r; int l; - l = 63 - __builtin_clzll(d); + l = 63 - rte_clz64(d); m = divide_128_div_64_to_64((1ULL << l), 0, d, &r) << 1; if (r << 1 < r || r << 1 >= d) diff --git a/lib/eal/common/rte_service.c b/lib/eal/common/rte_service.c index 94e872a08ab..098a821cd10 100644 --- a/lib/eal/common/rte_service.c +++ b/lib/eal/common/rte_service.c @@ -505,8 +505,8 @@ service_runner_func(void *arg) if (service_mask == 0) continue; - start_id = __builtin_ctzl(service_mask); - end_id = 64 - __builtin_clzl(service_mask); + start_id = rte_ctz64(service_mask); + end_id = 64 - rte_clz64(service_mask); for (i = start_id; i < end_id; i++) { /* return value ignored as no change to code flow */ @@ -586,7 +586,7 @@ rte_service_lcore_count_services(uint32_t lcore) if (!cs->is_service_core) return -ENOTSUP; - return __builtin_popcountll(cs->service_mask); + return rte_popcount64(cs->service_mask); } int32_t diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c index 0175d89e4b9..39a28685238 100644 --- a/lib/eal/freebsd/eal.c +++ b/lib/eal/freebsd/eal.c @@ -583,7 +583,7 @@ rte_eal_init(int argc, char **argv) static uint32_t run_once; uint32_t has_run = 0; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; + char thread_name[RTE_THREAD_NAME_SIZE]; const struct rte_config *config = rte_eal_get_configuration(); struct internal_config *internal_conf = eal_get_internal_configuration(); @@ -843,7 +843,7 @@ rte_eal_init(int argc, char **argv) /* Set thread_name for aid in debugging. */ snprintf(thread_name, sizeof(thread_name), - "rte-worker-%d", i); + "dpdk-worker%d", i); rte_thread_set_name(lcore_config[i].thread_id, thread_name); ret = rte_thread_set_affinity_by_id(lcore_config[i].thread_id, diff --git a/lib/eal/freebsd/eal_alarm.c b/lib/eal/freebsd/eal_alarm.c index acdcf538042..e5b0909a450 100644 --- a/lib/eal/freebsd/eal_alarm.c +++ b/lib/eal/freebsd/eal_alarm.c @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/lib/eal/freebsd/eal_interrupts.c b/lib/eal/freebsd/eal_interrupts.c index ffcf97ad4c5..2b31dfb0996 100644 --- a/lib/eal/freebsd/eal_interrupts.c +++ b/lib/eal/freebsd/eal_interrupts.c @@ -52,7 +52,7 @@ static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER; static struct rte_intr_source_list intr_sources; /* interrupt handling thread */ -static pthread_t intr_thread; +static rte_thread_t intr_thread; static volatile int kq = -1; @@ -591,7 +591,7 @@ eal_intr_process_interrupts(struct kevent *events, int nfds) } } -static void * +static uint32_t eal_intr_thread_main(void *arg __rte_unused) { struct kevent events[MAX_INTR_EVENTS]; @@ -619,7 +619,7 @@ eal_intr_thread_main(void *arg __rte_unused) } close(kq); kq = -1; - return NULL; + return 0; } int @@ -637,7 +637,7 @@ rte_eal_intr_init(void) } /* create the host thread to wait/handle the interrupt */ - ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL, + ret = rte_thread_create_internal_control(&intr_thread, "intr", eal_intr_thread_main, NULL); if (ret != 0) { rte_errno = -ret; @@ -746,5 +746,5 @@ rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle) int rte_thread_is_intr(void) { - return pthread_equal(intr_thread, pthread_self()); + return rte_thread_equal(intr_thread, rte_thread_self()); } diff --git a/lib/eal/freebsd/eal_thread.c b/lib/eal/freebsd/eal_thread.c index 3227d9b8a2a..6f97a3c2c19 100644 --- a/lib/eal/freebsd/eal_thread.c +++ b/lib/eal/freebsd/eal_thread.c @@ -34,7 +34,7 @@ int rte_sys_gettid(void) void rte_thread_set_name(rte_thread_t thread_id, const char *thread_name) { - char truncated[RTE_MAX_THREAD_NAME_LEN]; + char truncated[RTE_THREAD_NAME_SIZE]; const size_t truncatedsz = sizeof(truncated); if (strlcpy(truncated, thread_name, truncatedsz) >= truncatedsz) @@ -42,10 +42,3 @@ void rte_thread_set_name(rte_thread_t thread_id, const char *thread_name) pthread_set_name_np((pthread_t)thread_id.opaque_id, truncated); } - -int rte_thread_setname(pthread_t id, const char *name) -{ - /* this BSD function returns no error */ - pthread_set_name_np(id, name); - return 0; -} diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h index 82b9bfc5881..db6797e379f 100644 --- a/lib/eal/include/generic/rte_atomic.h +++ b/lib/eal/include/generic/rte_atomic.h @@ -13,8 +13,10 @@ */ #include + #include #include +#include #ifdef __DOXYGEN__ @@ -62,7 +64,7 @@ static inline void rte_rmb(void); * but has different syntax and memory ordering semantic. Hence * deprecated for the simplicity of memory ordering semantics in use. * - * rte_atomic_thread_fence(__ATOMIC_ACQ_REL) should be used instead. + * rte_atomic_thread_fence(rte_memory_order_acq_rel) should be used instead. */ static inline void rte_smp_mb(void); @@ -79,7 +81,7 @@ static inline void rte_smp_mb(void); * but has different syntax and memory ordering semantic. Hence * deprecated for the simplicity of memory ordering semantics in use. * - * rte_atomic_thread_fence(__ATOMIC_RELEASE) should be used instead. + * rte_atomic_thread_fence(rte_memory_order_release) should be used instead. * The fence also guarantees LOAD operations that precede the call * are globally visible across the lcores before the STORE operations * that follows it. @@ -99,7 +101,7 @@ static inline void rte_smp_wmb(void); * but has different syntax and memory ordering semantic. Hence * deprecated for the simplicity of memory ordering semantics in use. * - * rte_atomic_thread_fence(__ATOMIC_ACQUIRE) should be used instead. + * rte_atomic_thread_fence(rte_memory_order_acquire) should be used instead. * The fence also guarantees LOAD operations that precede the call * are globally visible across the lcores before the STORE operations * that follows it. @@ -146,17 +148,23 @@ static inline void rte_io_rmb(void); * Guarantees that operation reordering does not occur at compile time * for operations directly before and after the barrier. */ +#ifdef RTE_TOOLCHAIN_MSVC +#define rte_compiler_barrier() _ReadWriteBarrier() +#else #define rte_compiler_barrier() do { \ asm volatile ("" : : : "memory"); \ } while(0) +#endif /** * Synchronization fence between threads based on the specified memory order. */ -static inline void rte_atomic_thread_fence(int memorder); +static inline void rte_atomic_thread_fence(rte_memory_order memorder); /*------------------------- 16 bit atomic operations -------------------------*/ +#ifndef RTE_TOOLCHAIN_MSVC + /** * Atomic compare and set. * @@ -206,7 +214,7 @@ rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val); static inline uint16_t rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) { - return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); + return rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst); } #endif @@ -273,7 +281,8 @@ rte_atomic16_set(rte_atomic16_t *v, int16_t new_value) static inline void rte_atomic16_add(rte_atomic16_t *v, int16_t inc) { - __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST); + rte_atomic_fetch_add_explicit((volatile __rte_atomic int16_t *)&v->cnt, inc, + rte_memory_order_seq_cst); } /** @@ -287,7 +296,8 @@ rte_atomic16_add(rte_atomic16_t *v, int16_t inc) static inline void rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) { - __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST); + rte_atomic_fetch_sub_explicit((volatile __rte_atomic int16_t *)&v->cnt, dec, + rte_memory_order_seq_cst); } /** @@ -340,7 +350,8 @@ rte_atomic16_dec(rte_atomic16_t *v) static inline int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) { - return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int16_t *)&v->cnt, inc, + rte_memory_order_seq_cst) + inc; } /** @@ -360,7 +371,8 @@ rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) static inline int16_t rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) { - return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int16_t *)&v->cnt, dec, + rte_memory_order_seq_cst) - dec; } /** @@ -379,7 +391,8 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) { - return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_SEQ_CST) + 1 == 0; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int16_t *)&v->cnt, 1, + rte_memory_order_seq_cst) + 1 == 0; } #endif @@ -399,7 +412,8 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) { - return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_SEQ_CST) - 1 == 0; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int16_t *)&v->cnt, 1, + rte_memory_order_seq_cst) - 1 == 0; } #endif @@ -485,7 +499,7 @@ rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val); static inline uint32_t rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) { - return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); + return rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst); } #endif @@ -552,7 +566,8 @@ rte_atomic32_set(rte_atomic32_t *v, int32_t new_value) static inline void rte_atomic32_add(rte_atomic32_t *v, int32_t inc) { - __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST); + rte_atomic_fetch_add_explicit((volatile __rte_atomic int32_t *)&v->cnt, inc, + rte_memory_order_seq_cst); } /** @@ -566,7 +581,8 @@ rte_atomic32_add(rte_atomic32_t *v, int32_t inc) static inline void rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) { - __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST); + rte_atomic_fetch_sub_explicit((volatile __rte_atomic int32_t *)&v->cnt, dec, + rte_memory_order_seq_cst); } /** @@ -619,7 +635,8 @@ rte_atomic32_dec(rte_atomic32_t *v) static inline int32_t rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) { - return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int32_t *)&v->cnt, inc, + rte_memory_order_seq_cst) + inc; } /** @@ -639,7 +656,8 @@ rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) static inline int32_t rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) { - return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int32_t *)&v->cnt, dec, + rte_memory_order_seq_cst) - dec; } /** @@ -658,7 +676,8 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) { - return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_SEQ_CST) + 1 == 0; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int32_t *)&v->cnt, 1, + rte_memory_order_seq_cst) + 1 == 0; } #endif @@ -678,7 +697,8 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) { - return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_SEQ_CST) - 1 == 0; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int32_t *)&v->cnt, 1, + rte_memory_order_seq_cst) - 1 == 0; } #endif @@ -763,7 +783,7 @@ rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val); static inline uint64_t rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) { - return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); + return rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst); } #endif @@ -884,7 +904,8 @@ rte_atomic64_add(rte_atomic64_t *v, int64_t inc); static inline void rte_atomic64_add(rte_atomic64_t *v, int64_t inc) { - __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST); + rte_atomic_fetch_add_explicit((volatile __rte_atomic int64_t *)&v->cnt, inc, + rte_memory_order_seq_cst); } #endif @@ -903,7 +924,8 @@ rte_atomic64_sub(rte_atomic64_t *v, int64_t dec); static inline void rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) { - __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST); + rte_atomic_fetch_sub_explicit((volatile __rte_atomic int64_t *)&v->cnt, dec, + rte_memory_order_seq_cst); } #endif @@ -961,7 +983,8 @@ rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc); static inline int64_t rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) { - return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int64_t *)&v->cnt, inc, + rte_memory_order_seq_cst) + inc; } #endif @@ -985,7 +1008,8 @@ rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec); static inline int64_t rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) { - return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int64_t *)&v->cnt, dec, + rte_memory_order_seq_cst) - dec; } #endif @@ -1064,6 +1088,8 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) } #endif +#endif + /*------------------------ 128 bit atomic operations -------------------------*/ /** @@ -1073,7 +1099,9 @@ typedef struct { union { uint64_t val[2]; #ifdef RTE_ARCH_64 +#ifndef RTE_TOOLCHAIN_MSVC __extension__ __int128 int128; +#endif #endif }; } __rte_aligned(16) rte_int128_t; @@ -1114,8 +1142,8 @@ typedef struct { * stronger) model. * @param failure * If unsuccessful, the operation's memory behavior conforms to this (or a - * stronger) model. This argument cannot be __ATOMIC_RELEASE, - * __ATOMIC_ACQ_REL, or a stronger model than success. + * stronger) model. This argument cannot be rte_memory_order_release, + * rte_memory_order_acq_rel, or a stronger model than success. * @return * Non-zero on success; 0 on failure. */ diff --git a/lib/eal/include/generic/rte_byteorder.h b/lib/eal/include/generic/rte_byteorder.h index a67e1d70d90..f1c04ba83ea 100644 --- a/lib/eal/include/generic/rte_byteorder.h +++ b/lib/eal/include/generic/rte_byteorder.h @@ -45,6 +45,8 @@ #define RTE_BYTE_ORDER RTE_BIG_ENDIAN #elif defined __LITTLE_ENDIAN__ #define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN +#elif defined RTE_TOOLCHAIN_MSVC +#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN #endif #if !defined(RTE_BYTE_ORDER) #error Unknown endianness. @@ -234,14 +236,19 @@ static uint64_t rte_be_to_cpu_64(rte_be64_t x); #endif /* __DOXYGEN__ */ #ifdef RTE_FORCE_INTRINSICS -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) +#ifndef RTE_TOOLCHAIN_MSVC #define rte_bswap16(x) __builtin_bswap16(x) -#endif #define rte_bswap32(x) __builtin_bswap32(x) #define rte_bswap64(x) __builtin_bswap64(x) +#else +#define rte_bswap16(x) _byteswap_ushort(x) +#define rte_bswap32(x) _byteswap_ulong(x) + +#define rte_bswap64(x) _byteswap_uint64(x) +#endif #endif #endif /* _RTE_BYTEORDER_H_ */ diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h index ec1f41819cf..f2a1eadcbdb 100644 --- a/lib/eal/include/generic/rte_pause.h +++ b/lib/eal/include/generic/rte_pause.h @@ -14,8 +14,10 @@ #include #include + #include #include +#include /** * Pause CPU execution for a short while @@ -35,13 +37,11 @@ static inline void rte_pause(void); * A 16-bit expected value to be in the memory location. * @param memorder * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. + * rte_memory_order_acquire and rte_memory_order_relaxed. */ static __rte_always_inline void rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, - int memorder); + rte_memory_order memorder); /** * Wait for *addr to be updated with a 32-bit expected value, with a relaxed @@ -53,13 +53,11 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, * A 32-bit expected value to be in the memory location. * @param memorder * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. + * rte_memory_order_acquire and rte_memory_order_relaxed. */ static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, - int memorder); + rte_memory_order memorder); /** * Wait for *addr to be updated with a 64-bit expected value, with a relaxed @@ -71,42 +69,43 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, * A 64-bit expected value to be in the memory location. * @param memorder * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. + * rte_memory_order_acquire and rte_memory_order_relaxed. */ static __rte_always_inline void rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, - int memorder); + rte_memory_order memorder); #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED static __rte_always_inline void rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, - int memorder) + rte_memory_order memorder) { - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed); - while (__atomic_load_n(addr, memorder) != expected) + while (rte_atomic_load_explicit((volatile __rte_atomic uint16_t *)addr, memorder) + != expected) rte_pause(); } static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, - int memorder) + rte_memory_order memorder) { - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed); - while (__atomic_load_n(addr, memorder) != expected) + while (rte_atomic_load_explicit((volatile __rte_atomic uint32_t *)addr, memorder) + != expected) rte_pause(); } static __rte_always_inline void rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, - int memorder) + rte_memory_order memorder) { - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed); - while (__atomic_load_n(addr, memorder) != expected) + while (rte_atomic_load_explicit((volatile __rte_atomic uint64_t *)addr, memorder) + != expected) rte_pause(); } @@ -124,17 +123,15 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, * An expected value to be in the memory location. * @param memorder * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. + * rte_memory_order_acquire and rte_memory_order_relaxed. */ #define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do { \ RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \ - RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \ - memorder != __ATOMIC_RELAXED); \ + RTE_BUILD_BUG_ON((memorder) != rte_memory_order_acquire && \ + (memorder) != rte_memory_order_relaxed); \ typeof(*(addr)) expected_value = (expected); \ - while (!((__atomic_load_n((addr), (memorder)) & (mask)) cond \ - expected_value)) \ + while (!((rte_atomic_load_explicit((addr), (memorder)) & (mask)) \ + cond expected_value)) \ rte_pause(); \ } while (0) #endif /* ! RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED */ diff --git a/lib/eal/include/generic/rte_rwlock.h b/lib/eal/include/generic/rte_rwlock.h index 9e083bbc61b..5f939be98c0 100644 --- a/lib/eal/include/generic/rte_rwlock.h +++ b/lib/eal/include/generic/rte_rwlock.h @@ -32,6 +32,7 @@ extern "C" { #include #include #include +#include /** * The rte_rwlock_t type. @@ -57,7 +58,7 @@ extern "C" { #define RTE_RWLOCK_READ 0x4 /* Reader increment */ typedef struct __rte_lockable { - int32_t cnt; + RTE_ATOMIC(int32_t) cnt; } rte_rwlock_t; /** @@ -80,6 +81,10 @@ rte_rwlock_init(rte_rwlock_t *rwl) /** * Take a read lock. Loop until the lock is held. * + * @note The RW lock isn't recursive, so calling this function on the same + * lock twice without releasing it could potentially result in a deadlock + * scenario when a write lock is involved. + * * @param rwl * A pointer to a rwlock structure. */ @@ -92,21 +97,21 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl) while (1) { /* Wait while writer is present or pending */ - while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) + while (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed) & RTE_RWLOCK_MASK) rte_pause(); /* Try to get read lock */ - x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ; + x = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ, + rte_memory_order_acquire) + RTE_RWLOCK_READ; /* If no writer, then acquire was successful */ if (likely(!(x & RTE_RWLOCK_MASK))) return; /* Lost race with writer, backout the change. */ - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, + rte_memory_order_relaxed); } } @@ -127,20 +132,20 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl) { int32_t x; - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed); /* fail if write lock is held or writer is pending */ if (x & RTE_RWLOCK_MASK) return -EBUSY; /* Try to get read lock */ - x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ; + x = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ, + rte_memory_order_acquire) + RTE_RWLOCK_READ; /* Back out if writer raced in */ if (unlikely(x & RTE_RWLOCK_MASK)) { - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_RELEASE); + rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, + rte_memory_order_release); return -EBUSY; } @@ -158,7 +163,7 @@ rte_rwlock_read_unlock(rte_rwlock_t *rwl) __rte_unlock_function(rwl) __rte_no_thread_safety_analysis { - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, __ATOMIC_RELEASE); + rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, rte_memory_order_release); } /** @@ -178,10 +183,10 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl) { int32_t x; - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed); if (x < RTE_RWLOCK_WRITE && - __atomic_compare_exchange_n(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE, - 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE, + rte_memory_order_acquire, rte_memory_order_relaxed)) return 0; else return -EBUSY; @@ -201,22 +206,25 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl) int32_t x; while (1) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed); /* No readers or writers? */ if (likely(x < RTE_RWLOCK_WRITE)) { /* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */ - if (__atomic_compare_exchange_n(&rwl->cnt, &x, RTE_RWLOCK_WRITE, 1, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + if (rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, + RTE_RWLOCK_WRITE, rte_memory_order_acquire, + rte_memory_order_relaxed)) return; } /* Turn on writer wait bit */ if (!(x & RTE_RWLOCK_WAIT)) - __atomic_fetch_or(&rwl->cnt, RTE_RWLOCK_WAIT, __ATOMIC_RELAXED); + rte_atomic_fetch_or_explicit(&rwl->cnt, RTE_RWLOCK_WAIT, + rte_memory_order_relaxed); /* Wait until no readers before trying again */ - while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > RTE_RWLOCK_WAIT) + while (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed) + > RTE_RWLOCK_WAIT) rte_pause(); } @@ -233,7 +241,7 @@ rte_rwlock_write_unlock(rte_rwlock_t *rwl) __rte_unlock_function(rwl) __rte_no_thread_safety_analysis { - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE); + rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, rte_memory_order_release); } /** @@ -247,7 +255,7 @@ rte_rwlock_write_unlock(rte_rwlock_t *rwl) static inline int rte_rwlock_write_is_locked(rte_rwlock_t *rwl) { - if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & RTE_RWLOCK_WRITE) + if (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed) & RTE_RWLOCK_WRITE) return 1; return 0; diff --git a/lib/eal/include/generic/rte_spinlock.h b/lib/eal/include/generic/rte_spinlock.h index c50ebaaa80f..23fb04896f2 100644 --- a/lib/eal/include/generic/rte_spinlock.h +++ b/lib/eal/include/generic/rte_spinlock.h @@ -23,12 +23,13 @@ #endif #include #include +#include /** * The rte_spinlock_t type. */ typedef struct __rte_lockable { - volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ + volatile RTE_ATOMIC(int) locked; /**< lock status 0 = unlocked, 1 = locked */ } rte_spinlock_t; /** @@ -65,10 +66,10 @@ rte_spinlock_lock(rte_spinlock_t *sl) { int exp = 0; - while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { - rte_wait_until_equal_32((volatile uint32_t *)&sl->locked, - 0, __ATOMIC_RELAXED); + while (!rte_atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1, + rte_memory_order_acquire, rte_memory_order_relaxed)) { + rte_wait_until_equal_32((volatile uint32_t *)(uintptr_t)&sl->locked, + 0, rte_memory_order_relaxed); exp = 0; } } @@ -89,7 +90,7 @@ static inline void rte_spinlock_unlock(rte_spinlock_t *sl) __rte_no_thread_safety_analysis { - __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&sl->locked, 0, rte_memory_order_release); } #endif @@ -112,9 +113,8 @@ rte_spinlock_trylock(rte_spinlock_t *sl) __rte_no_thread_safety_analysis { int exp = 0; - return __atomic_compare_exchange_n(&sl->locked, &exp, 1, - 0, /* disallow spurious failure */ - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + return rte_atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1, + rte_memory_order_acquire, rte_memory_order_relaxed); } #endif @@ -128,7 +128,7 @@ rte_spinlock_trylock(rte_spinlock_t *sl) */ static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) { - return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE); + return rte_atomic_load_explicit(&sl->locked, rte_memory_order_acquire); } /** diff --git a/lib/eal/include/generic/rte_vect.h b/lib/eal/include/generic/rte_vect.h index 3fec2bf1a2e..777510cc3df 100644 --- a/lib/eal/include/generic/rte_vect.h +++ b/lib/eal/include/generic/rte_vect.h @@ -17,6 +17,8 @@ #include +#ifndef RTE_TOOLCHAIN_MSVC + /* Unsigned vector types */ /** @@ -186,6 +188,8 @@ typedef int32_t rte_v256s32_t __attribute__((vector_size(32), aligned(32))); */ typedef int64_t rte_v256s64_t __attribute__((vector_size(32), aligned(32))); +#endif + /** * The max SIMD bitwidth value to limit vector path selection. */ diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build index a0463efac78..e94b056d469 100644 --- a/lib/eal/include/meson.build +++ b/lib/eal/include/meson.build @@ -42,6 +42,7 @@ headers += files( 'rte_seqlock.h', 'rte_service.h', 'rte_service_component.h', + 'rte_stdatomic.h', 'rte_string_fns.h', 'rte_tailq.h', 'rte_thread.h', diff --git a/lib/eal/include/rte_bitops.h b/lib/eal/include/rte_bitops.h index f50dbe43880..174d25216df 100644 --- a/lib/eal/include/rte_bitops.h +++ b/lib/eal/include/rte_bitops.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2020 Arm Limited + * Copyright(c) 2010-2019 Intel Corporation + * Copyright(c) 2023 Microsoft Corporation */ #ifndef _RTE_BITOPS_H_ @@ -275,6 +277,488 @@ rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr) return val & mask; } +#ifdef RTE_TOOLCHAIN_MSVC + +/** + * Get the count of leading 0-bits in v. + * + * @param v + * The value. + * @return + * The count of leading zero bits. + */ +static inline unsigned int +rte_clz32(uint32_t v) +{ + unsigned long rv; + + (void)_BitScanReverse(&rv, v); + + return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv); +} + +/** + * Get the count of leading 0-bits in v. + * + * @param v + * The value. + * @return + * The count of leading zero bits. + */ +static inline unsigned int +rte_clz64(uint64_t v) +{ + unsigned long rv; + + (void)_BitScanReverse64(&rv, v); + + return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv); +} + +/** + * Get the count of trailing 0-bits in v. + * + * @param v + * The value. + * @return + * The count of trailing zero bits. + */ +static inline unsigned int +rte_ctz32(uint32_t v) +{ + unsigned long rv; + + (void)_BitScanForward(&rv, v); + + return (unsigned int)rv; +} + +/** + * Get the count of trailing 0-bits in v. + * + * @param v + * The value. + * @return + * The count of trailing zero bits. + */ +static inline unsigned int +rte_ctz64(uint64_t v) +{ + unsigned long rv; + + (void)_BitScanForward64(&rv, v); + + return (unsigned int)rv; +} + +/** + * Get the count of 1-bits in v. + * + * @param v + * The value. + * @return + * The count of 1-bits. + */ +static inline unsigned int +rte_popcount32(uint32_t v) +{ + return (unsigned int)__popcnt(v); +} + +/** + * Get the count of 1-bits in v. + * + * @param v + * The value. + * @return + * The count of 1-bits. + */ +static inline unsigned int +rte_popcount64(uint64_t v) +{ + return (unsigned int)__popcnt64(v); +} + +#else + +/** + * Get the count of leading 0-bits in v. + * + * @param v + * The value. + * @return + * The count of leading zero bits. + */ +static inline unsigned int +rte_clz32(uint32_t v) +{ + return (unsigned int)__builtin_clz(v); +} + +/** + * Get the count of leading 0-bits in v. + * + * @param v + * The value. + * @return + * The count of leading zero bits. + */ +static inline unsigned int +rte_clz64(uint64_t v) +{ + return (unsigned int)__builtin_clzll(v); +} + +/** + * Get the count of trailing 0-bits in v. + * + * @param v + * The value. + * @return + * The count of trailing zero bits. + */ +static inline unsigned int +rte_ctz32(uint32_t v) +{ + return (unsigned int)__builtin_ctz(v); +} + +/** + * Get the count of trailing 0-bits in v. + * + * @param v + * The value. + * @return + * The count of trailing zero bits. + */ +static inline unsigned int +rte_ctz64(uint64_t v) +{ + return (unsigned int)__builtin_ctzll(v); +} + +/** + * Get the count of 1-bits in v. + * + * @param v + * The value. + * @return + * The count of 1-bits. + */ +static inline unsigned int +rte_popcount32(uint32_t v) +{ + return (unsigned int)__builtin_popcount(v); +} + +/** + * Get the count of 1-bits in v. + * + * @param v + * The value. + * @return + * The count of 1-bits. + */ +static inline unsigned int +rte_popcount64(uint64_t v) +{ + return (unsigned int)__builtin_popcountll(v); +} + +#endif + +/** + * Combines 32b inputs most significant set bits into the least + * significant bits to construct a value with the same MSBs as x + * but all 1's under it. + * + * @param x + * The integer whose MSBs need to be combined with its LSBs + * @return + * The combined value. + */ +static inline uint32_t +rte_combine32ms1b(uint32_t x) +{ + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + + return x; +} + +/** + * Combines 64b inputs most significant set bits into the least + * significant bits to construct a value with the same MSBs as x + * but all 1's under it. + * + * @param v + * The integer whose MSBs need to be combined with its LSBs + * @return + * The combined value. + */ +static inline uint64_t +rte_combine64ms1b(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + + return v; +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). + * If a least significant 1 bit is found, its bit index is returned. + * If the content of the input parameter is zero, then the content of the return + * value is undefined. + * @param v + * input parameter, should not be zero. + * @return + * least significant set bit in the input parameter. + */ +static inline uint32_t +rte_bsf32(uint32_t v) +{ + return (uint32_t)rte_ctz32(v); +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). Safe version (checks for input parameter being zero). + * + * @warning ``pos`` must be a valid pointer. It is not checked! + * + * @param v + * The input parameter. + * @param pos + * If ``v`` was not 0, this value will contain position of least significant + * bit within the input parameter. + * @return + * Returns 0 if ``v`` was 0, otherwise returns 1. + */ +static inline int +rte_bsf32_safe(uint32_t v, uint32_t *pos) +{ + if (v == 0) + return 0; + + *pos = rte_bsf32(v); + return 1; +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). + * If a least significant 1 bit is found, its bit index is returned. + * If the content of the input parameter is zero, then the content of the return + * value is undefined. + * @param v + * input parameter, should not be zero. + * @return + * least significant set bit in the input parameter. + */ +static inline uint32_t +rte_bsf64(uint64_t v) +{ + return (uint32_t)rte_ctz64(v); +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). Safe version (checks for input parameter being zero). + * + * @warning ``pos`` must be a valid pointer. It is not checked! + * + * @param v + * The input parameter. + * @param pos + * If ``v`` was not 0, this value will contain position of least significant + * bit within the input parameter. + * @return + * Returns 0 if ``v`` was 0, otherwise returns 1. + */ +static inline int +rte_bsf64_safe(uint64_t v, uint32_t *pos) +{ + if (v == 0) + return 0; + + *pos = rte_bsf64(v); + return 1; +} + +/** + * Return the last (most-significant) bit set. + * + * @note The last (most significant) bit is at position 32. + * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32 + * + * @param x + * The input parameter. + * @return + * The last (most-significant) bit set, or 0 if the input is 0. + */ +static inline uint32_t +rte_fls_u32(uint32_t x) +{ + return (x == 0) ? 0 : 32 - rte_clz32(x); +} + +/** + * Return the last (most-significant) bit set. + * + * @note The last (most significant) bit is at position 64. + * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1, + * rte_fls_u64(0x8000000000000000) = 64 + * + * @param x + * The input parameter. + * @return + * The last (most-significant) bit set, or 0 if the input is 0. + */ +static inline uint32_t +rte_fls_u64(uint64_t x) +{ + return (x == 0) ? 0 : 64 - rte_clz64(x); +} + +/*********** Macros to work with powers of 2 ********/ + +/** + * Macro to return 1 if n is a power of 2, 0 otherwise + */ +#define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n))) + +/** + * Returns true if n is a power of 2 + * @param n + * Number to check + * @return 1 if true, 0 otherwise + */ +static inline int +rte_is_power_of_2(uint32_t n) +{ + return n && !(n & (n - 1)); +} + +/** + * Aligns input parameter to the next power of 2 + * + * @param x + * The integer value to align + * + * @return + * Input parameter aligned to the next power of 2 + */ +static inline uint32_t +rte_align32pow2(uint32_t x) +{ + x--; + x = rte_combine32ms1b(x); + + return x + 1; +} + +/** + * Aligns input parameter to the previous power of 2 + * + * @param x + * The integer value to align + * + * @return + * Input parameter aligned to the previous power of 2 + */ +static inline uint32_t +rte_align32prevpow2(uint32_t x) +{ + x = rte_combine32ms1b(x); + + return x - (x >> 1); +} + +/** + * Aligns 64b input parameter to the next power of 2 + * + * @param v + * The 64b value to align + * + * @return + * Input parameter aligned to the next power of 2 + */ +static inline uint64_t +rte_align64pow2(uint64_t v) +{ + v--; + v = rte_combine64ms1b(v); + + return v + 1; +} + +/** + * Aligns 64b input parameter to the previous power of 2 + * + * @param v + * The 64b value to align + * + * @return + * Input parameter aligned to the previous power of 2 + */ +static inline uint64_t +rte_align64prevpow2(uint64_t v) +{ + v = rte_combine64ms1b(v); + + return v - (v >> 1); +} + +/** + * Return the rounded-up log2 of a integer. + * + * @note Contrary to the logarithm mathematical operation, + * rte_log2_u32(0) == 0 and not -inf. + * + * @param v + * The input parameter. + * @return + * The rounded-up log2 of the input, or 0 if the input is 0. + */ +static inline uint32_t +rte_log2_u32(uint32_t v) +{ + if (v == 0) + return 0; + v = rte_align32pow2(v); + return rte_bsf32(v); +} + +/** + * Return the rounded-up log2 of a 64-bit integer. + * + * @note Contrary to the logarithm mathematical operation, + * rte_log2_u64(0) == 0 and not -inf. + * + * @param v + * The input parameter. + * @return + * The rounded-up log2 of the input, or 0 if the input is 0. + */ +static inline uint32_t +rte_log2_u64(uint64_t v) +{ + if (v == 0) + return 0; + v = rte_align64pow2(v); + /* we checked for v being 0 already, so no undefined behavior */ + return rte_bsf64(v); +} + #ifdef __cplusplus } #endif diff --git a/lib/eal/include/rte_branch_prediction.h b/lib/eal/include/rte_branch_prediction.h index 414cd921ba9..c0356ca0804 100644 --- a/lib/eal/include/rte_branch_prediction.h +++ b/lib/eal/include/rte_branch_prediction.h @@ -24,7 +24,11 @@ extern "C" { * do_stuff(); */ #ifndef likely +#ifdef RTE_TOOLCHAIN_MSVC +#define likely(x) (!!(x)) +#else #define likely(x) __builtin_expect(!!(x), 1) +#endif #endif /* likely */ /** @@ -37,7 +41,11 @@ extern "C" { * do_stuff(); */ #ifndef unlikely +#ifdef RTE_TOOLCHAIN_MSVC +#define unlikely(x) (!!(x)) +#else #define unlikely(x) __builtin_expect(!!(x), 0) +#endif #endif /* unlikely */ #ifdef __cplusplus diff --git a/lib/eal/include/rte_common.h b/lib/eal/include/rte_common.h index 77f937e9a5c..484f81e1019 100644 --- a/lib/eal/include/rte_common.h +++ b/lib/eal/include/rte_common.h @@ -24,9 +24,11 @@ extern "C" { /* OS specific include */ #include +#ifndef RTE_TOOLCHAIN_MSVC #ifndef typeof #define typeof __typeof__ #endif +#endif #ifndef __cplusplus #ifndef asm @@ -34,6 +36,10 @@ extern "C" { #endif #endif +#ifdef RTE_TOOLCHAIN_MSVC +#define __extension__ +#endif + /* * RTE_TOOLCHAIN_GCC is defined if the target is built with GCC, * while a host application (like pmdinfogen) may have another compiler. @@ -58,7 +64,11 @@ extern "C" { /** * Force alignment */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_aligned(a) +#else #define __rte_aligned(a) __attribute__((__aligned__(a))) +#endif #ifdef RTE_ARCH_STRICT_ALIGN typedef uint64_t unaligned_uint64_t __rte_aligned(1); @@ -73,16 +83,29 @@ typedef uint16_t unaligned_uint16_t; /** * Force a structure to be packed */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_packed +#else #define __rte_packed __attribute__((__packed__)) +#endif /** * Macro to mark a type that is not subject to type-based aliasing rules */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_may_alias +#else #define __rte_may_alias __attribute__((__may_alias__)) +#endif /******* Macro to mark functions and fields scheduled for removal *****/ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_deprecated +#define __rte_deprecated_msg(msg) +#else #define __rte_deprecated __attribute__((__deprecated__)) #define __rte_deprecated_msg(msg) __attribute__((__deprecated__(msg))) +#endif /** * Macro to mark macros and defines scheduled for removal @@ -103,14 +126,22 @@ typedef uint16_t unaligned_uint16_t; /** * Force symbol to be generated even if it appears to be unused. */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_used +#else #define __rte_used __attribute__((used)) +#endif /*********** Macros to eliminate unused variable warnings ********/ /** * short definition to mark a function parameter unused */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_unused +#else #define __rte_unused __attribute__((__unused__)) +#endif /** * Mark pointer as restricted with regard to pointer aliasing. @@ -134,6 +165,9 @@ typedef uint16_t unaligned_uint16_t; * even if the underlying stdio implementation is ANSI-compliant, * so this must be overridden. */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_format_printf(format_index, first_arg) +#else #if RTE_CC_IS_GNU #define __rte_format_printf(format_index, first_arg) \ __attribute__((format(gnu_printf, format_index, first_arg))) @@ -141,6 +175,7 @@ typedef uint16_t unaligned_uint16_t; #define __rte_format_printf(format_index, first_arg) \ __attribute__((format(printf, format_index, first_arg))) #endif +#endif /** * Tells compiler that the function returns a value that points to @@ -172,8 +207,29 @@ typedef uint16_t unaligned_uint16_t; * Lowest number is the first to run. */ #ifndef RTE_INIT_PRIO /* Allow to override from EAL */ +#ifndef RTE_TOOLCHAIN_MSVC #define RTE_INIT_PRIO(func, prio) \ static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void) +#else +/* definition from the Microsoft CRT */ +typedef int(__cdecl *_PIFV)(void); + +#define CTOR_SECTION_LOG ".CRT$XIB" +#define CTOR_SECTION_BUS ".CRT$XIC" +#define CTOR_SECTION_CLASS ".CRT$XID" +#define CTOR_SECTION_LAST ".CRT$XIY" + +#define CTOR_PRIORITY_TO_SECTION(priority) CTOR_SECTION_ ## priority + +#define RTE_INIT_PRIO(name, priority) \ + static void name(void); \ + static int __cdecl name ## _thunk(void) { name(); return 0; } \ + __pragma(const_seg(CTOR_PRIORITY_TO_SECTION(priority))) \ + __declspec(allocate(CTOR_PRIORITY_TO_SECTION(priority))) \ + _PIFV name ## _pointer = &name ## _thunk; \ + __pragma(const_seg()) \ + static void name(void) +#endif #endif /** @@ -197,8 +253,24 @@ static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void) * Lowest number is the last to run. */ #ifndef RTE_FINI_PRIO /* Allow to override from EAL */ +#ifndef RTE_TOOLCHAIN_MSVC #define RTE_FINI_PRIO(func, prio) \ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) +#else +#define DTOR_SECTION_LOG "mydtor$B" +#define DTOR_SECTION_BUS "mydtor$C" +#define DTOR_SECTION_CLASS "mydtor$D" +#define DTOR_SECTION_LAST "mydtor$Y" + +#define DTOR_PRIORITY_TO_SECTION(priority) DTOR_SECTION_ ## priority + +#define RTE_FINI_PRIO(name, priority) \ + static void name(void); \ + __pragma(const_seg(DTOR_PRIORITY_TO_SECTION(priority))) \ + __declspec(allocate(DTOR_PRIORITY_TO_SECTION(priority))) name ## _pointer = &name; \ + __pragma(const_seg()) \ + static void name(void) +#endif #endif /** @@ -215,7 +287,11 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) /** * Hint never returning function */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_noreturn +#else #define __rte_noreturn __attribute__((noreturn)) +#endif /** * Issue a warning in case the function's return value is ignored. @@ -240,12 +316,20 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) * } * @endcode */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_warn_unused_result +#else #define __rte_warn_unused_result __attribute__((warn_unused_result)) +#endif /** * Force a function to be inlined */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_always_inline +#else #define __rte_always_inline inline __attribute__((always_inline)) +#endif /** * Force a function to be noinlined @@ -260,7 +344,11 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) /** * Hint function in the cold path */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_cold +#else #define __rte_cold __attribute__((cold)) +#endif /** * Disable AddressSanitizer on some code @@ -430,11 +518,28 @@ rte_is_aligned(const void * const __rte_restrict ptr, const unsigned int align) #define RTE_CACHE_LINE_MIN_SIZE 64 /** Force alignment to cache line. */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_cache_aligned +#else #define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) +#endif /** Force minimum cache line alignment. */ #define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) +#define _RTE_CACHE_GUARD_HELPER2(unique) \ + char cache_guard_ ## unique[RTE_CACHE_LINE_SIZE * RTE_CACHE_GUARD_LINES] \ + __rte_cache_aligned +#define _RTE_CACHE_GUARD_HELPER1(unique) _RTE_CACHE_GUARD_HELPER2(unique) +/** + * Empty cache lines, to guard against false sharing-like effects + * on systems with a next-N-lines hardware prefetcher. + * + * Use as spacing between data accessed by different lcores, + * to prevent cache thrashing on hardware with speculative prefetching. + */ +#define RTE_CACHE_GUARD _RTE_CACHE_GUARD_HELPER1(__COUNTER__) + /*********** PA/IOVA type definitions ********/ /** Physical address */ @@ -453,6 +558,8 @@ typedef uint64_t rte_iova_t; /*********** Structure alignment markers ********/ +#ifndef RTE_TOOLCHAIN_MSVC + /** Generic marker for any place in a structure. */ __extension__ typedef void *RTE_MARKER[0]; /** Marker for 1B alignment in a structure. */ @@ -464,139 +571,7 @@ __extension__ typedef uint32_t RTE_MARKER32[0]; /** Marker for 8B alignment in a structure. */ __extension__ typedef uint64_t RTE_MARKER64[0]; -/** - * Combines 32b inputs most significant set bits into the least - * significant bits to construct a value with the same MSBs as x - * but all 1's under it. - * - * @param x - * The integer whose MSBs need to be combined with its LSBs - * @return - * The combined value. - */ -static inline uint32_t -rte_combine32ms1b(uint32_t x) -{ - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - - return x; -} - -/** - * Combines 64b inputs most significant set bits into the least - * significant bits to construct a value with the same MSBs as x - * but all 1's under it. - * - * @param v - * The integer whose MSBs need to be combined with its LSBs - * @return - * The combined value. - */ -static inline uint64_t -rte_combine64ms1b(uint64_t v) -{ - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - - return v; -} - -/*********** Macros to work with powers of 2 ********/ - -/** - * Macro to return 1 if n is a power of 2, 0 otherwise - */ -#define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n))) - -/** - * Returns true if n is a power of 2 - * @param n - * Number to check - * @return 1 if true, 0 otherwise - */ -static inline int -rte_is_power_of_2(uint32_t n) -{ - return n && !(n & (n - 1)); -} - -/** - * Aligns input parameter to the next power of 2 - * - * @param x - * The integer value to align - * - * @return - * Input parameter aligned to the next power of 2 - */ -static inline uint32_t -rte_align32pow2(uint32_t x) -{ - x--; - x = rte_combine32ms1b(x); - - return x + 1; -} - -/** - * Aligns input parameter to the previous power of 2 - * - * @param x - * The integer value to align - * - * @return - * Input parameter aligned to the previous power of 2 - */ -static inline uint32_t -rte_align32prevpow2(uint32_t x) -{ - x = rte_combine32ms1b(x); - - return x - (x >> 1); -} - -/** - * Aligns 64b input parameter to the next power of 2 - * - * @param v - * The 64b value to align - * - * @return - * Input parameter aligned to the next power of 2 - */ -static inline uint64_t -rte_align64pow2(uint64_t v) -{ - v--; - v = rte_combine64ms1b(v); - - return v + 1; -} - -/** - * Aligns 64b input parameter to the previous power of 2 - * - * @param v - * The 64b value to align - * - * @return - * Input parameter aligned to the previous power of 2 - */ -static inline uint64_t -rte_align64prevpow2(uint64_t v) -{ - v = rte_combine64ms1b(v); - - return v - (v >> 1); -} +#endif /*********** Macros for calculating min and max **********/ @@ -622,165 +597,6 @@ rte_align64prevpow2(uint64_t v) /*********** Other general functions / macros ********/ -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). - * If a least significant 1 bit is found, its bit index is returned. - * If the content of the input parameter is zero, then the content of the return - * value is undefined. - * @param v - * input parameter, should not be zero. - * @return - * least significant set bit in the input parameter. - */ -static inline uint32_t -rte_bsf32(uint32_t v) -{ - return (uint32_t)__builtin_ctz(v); -} - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). Safe version (checks for input parameter being zero). - * - * @warning ``pos`` must be a valid pointer. It is not checked! - * - * @param v - * The input parameter. - * @param pos - * If ``v`` was not 0, this value will contain position of least significant - * bit within the input parameter. - * @return - * Returns 0 if ``v`` was 0, otherwise returns 1. - */ -static inline int -rte_bsf32_safe(uint32_t v, uint32_t *pos) -{ - if (v == 0) - return 0; - - *pos = rte_bsf32(v); - return 1; -} - -/** - * Return the rounded-up log2 of a integer. - * - * @note Contrary to the logarithm mathematical operation, - * rte_log2_u32(0) == 0 and not -inf. - * - * @param v - * The input parameter. - * @return - * The rounded-up log2 of the input, or 0 if the input is 0. - */ -static inline uint32_t -rte_log2_u32(uint32_t v) -{ - if (v == 0) - return 0; - v = rte_align32pow2(v); - return rte_bsf32(v); -} - - -/** - * Return the last (most-significant) bit set. - * - * @note The last (most significant) bit is at position 32. - * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32 - * - * @param x - * The input parameter. - * @return - * The last (most-significant) bit set, or 0 if the input is 0. - */ -static inline uint32_t -rte_fls_u32(uint32_t x) -{ - return (x == 0) ? 0 : 32 - __builtin_clz(x); -} - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). - * If a least significant 1 bit is found, its bit index is returned. - * If the content of the input parameter is zero, then the content of the return - * value is undefined. - * @param v - * input parameter, should not be zero. - * @return - * least significant set bit in the input parameter. - */ -static inline uint32_t -rte_bsf64(uint64_t v) -{ - return (uint32_t)__builtin_ctzll(v); -} - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). Safe version (checks for input parameter being zero). - * - * @warning ``pos`` must be a valid pointer. It is not checked! - * - * @param v - * The input parameter. - * @param pos - * If ``v`` was not 0, this value will contain position of least significant - * bit within the input parameter. - * @return - * Returns 0 if ``v`` was 0, otherwise returns 1. - */ -static inline int -rte_bsf64_safe(uint64_t v, uint32_t *pos) -{ - if (v == 0) - return 0; - - *pos = rte_bsf64(v); - return 1; -} - -/** - * Return the last (most-significant) bit set. - * - * @note The last (most significant) bit is at position 64. - * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1, - * rte_fls_u64(0x8000000000000000) = 64 - * - * @param x - * The input parameter. - * @return - * The last (most-significant) bit set, or 0 if the input is 0. - */ -static inline uint32_t -rte_fls_u64(uint64_t x) -{ - return (x == 0) ? 0 : 64 - __builtin_clzll(x); -} - -/** - * Return the rounded-up log2 of a 64-bit integer. - * - * @note Contrary to the logarithm mathematical operation, - * rte_log2_u64(0) == 0 and not -inf. - * - * @param v - * The input parameter. - * @return - * The rounded-up log2 of the input, or 0 if the input is 0. - */ -static inline uint32_t -rte_log2_u64(uint64_t v) -{ - if (v == 0) - return 0; - v = rte_align64pow2(v); - /* we checked for v being 0 already, so no undefined behavior */ - return rte_bsf64(v); -} - #ifndef offsetof /** Return the offset of a field in a structure. */ #define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) @@ -801,6 +617,10 @@ rte_log2_u64(uint64_t v) * struct wrapper *w = container_of(x, struct wrapper, c); */ #ifndef container_of +#ifdef RTE_TOOLCHAIN_MSVC +#define container_of(ptr, type, member) \ + ((type *)((uintptr_t)(ptr) - offsetof(type, member))) +#else #define container_of(ptr, type, member) __extension__ ({ \ const typeof(((type *)0)->member) *_ptr = (ptr); \ __rte_unused type *_target_ptr = \ @@ -808,6 +628,7 @@ rte_log2_u64(uint64_t v) (type *)(((uintptr_t)_ptr) - offsetof(type, member)); \ }) #endif +#endif /** Swap two variables. */ #define RTE_SWAP(a, b) \ diff --git a/lib/eal/include/rte_compat.h b/lib/eal/include/rte_compat.h index fc9fbaaab2b..716bc036164 100644 --- a/lib/eal/include/rte_compat.h +++ b/lib/eal/include/rte_compat.h @@ -12,14 +12,22 @@ extern "C" { #ifndef ALLOW_EXPERIMENTAL_API +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_experimental +#else #define __rte_experimental \ __attribute__((deprecated("Symbol is not yet part of stable ABI"), \ section(".text.experimental"))) +#endif #else +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_experimental +#else #define __rte_experimental \ __attribute__((section(".text.experimental"))) +#endif #endif @@ -30,23 +38,35 @@ __attribute__((section(".text.experimental"))) #if !defined ALLOW_INTERNAL_API && __has_attribute(error) /* For GCC */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_internal +#else #define __rte_internal \ __attribute__((error("Symbol is not public ABI"), \ section(".text.internal"))) +#endif #elif !defined ALLOW_INTERNAL_API && __has_attribute(diagnose_if) /* For clang */ +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_internal +#else #define __rte_internal \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wgcc-compat\"") \ __attribute__((diagnose_if(1, "Symbol is not public ABI", "error"), \ section(".text.internal"))) \ _Pragma("GCC diagnostic pop") +#endif #else +#ifdef RTE_TOOLCHAIN_MSVC +#define __rte_internal +#else #define __rte_internal \ __attribute__((section(".text.internal"))) +#endif #endif diff --git a/lib/eal/include/rte_debug.h b/lib/eal/include/rte_debug.h index 2c4b94a7c9b..74593cd4d4a 100644 --- a/lib/eal/include/rte_debug.h +++ b/lib/eal/include/rte_debug.h @@ -60,11 +60,7 @@ void rte_dump_stack(void); * documentation. */ void __rte_panic(const char *funcname , const char *format, ...) -#ifdef __GNUC__ -#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) __rte_cold -#endif -#endif __rte_noreturn __rte_format_printf(2, 3); diff --git a/lib/eal/include/rte_eal.h b/lib/eal/include/rte_eal.h index 53c4a5519e6..cd318ee1415 100644 --- a/lib/eal/include/rte_eal.h +++ b/lib/eal/include/rte_eal.h @@ -27,9 +27,6 @@ extern "C" { #define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */ -/* Maximum thread_name length. */ -#define RTE_MAX_THREAD_NAME_LEN 16 - /** * The type of process in a linux, multi-process setup */ diff --git a/lib/eal/include/rte_eal_memconfig.h b/lib/eal/include/rte_eal_memconfig.h index c527f9aa294..0b1d0d4ff0d 100644 --- a/lib/eal/include/rte_eal_memconfig.h +++ b/lib/eal/include/rte_eal_memconfig.h @@ -39,6 +39,10 @@ __rte_internal rte_spinlock_t * rte_mcfg_timer_get_lock(void); +__rte_internal +rte_spinlock_t * +rte_mcfg_ethdev_get_lock(void); + /** * Lock the internal EAL shared memory configuration for shared access. */ diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h index 6ce810b876d..7deae47af32 100644 --- a/lib/eal/include/rte_lcore.h +++ b/lib/eal/include/rte_lcore.h @@ -385,20 +385,6 @@ void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb); void rte_lcore_dump(FILE *f); -/** - * Set thread names. - * - * @note It fails with glibc < 2.12. - * - * @param id - * Thread id. - * @param name - * Thread name to set. - * @return - * On success, return 0; otherwise return a negative value. - */ -int rte_thread_setname(pthread_t id, const char *name); - /** * Register current non-EAL thread as a lcore. * @@ -421,34 +407,6 @@ rte_thread_register(void); void rte_thread_unregister(void); -/** - * Create a control thread. - * - * Creates a control thread with the given name and attributes. The - * affinity of the new thread is based on the CPU affinity retrieved - * at the time rte_eal_init() was called, the dataplane and service - * lcores are then excluded. If setting the name of the thread fails, - * the error is ignored and a debug message is logged. - * - * @param thread - * Filled with the thread id of the new created thread. - * @param name - * The name of the control thread (max 16 characters including '\0'). - * @param attr - * Attributes for the new thread. - * @param start_routine - * Function to be executed by the new thread. - * @param arg - * Argument passed to start_routine. - * @return - * On success, returns 0; on error, it returns a negative value - * corresponding to the error number. - */ -int -rte_ctrl_thread_create(pthread_t *thread, const char *name, - const pthread_attr_t *attr, - void *(*start_routine)(void *), void *arg); - #ifdef __cplusplus } #endif diff --git a/lib/eal/include/rte_lock_annotations.h b/lib/eal/include/rte_lock_annotations.h index 9fc50082d63..2456a69352b 100644 --- a/lib/eal/include/rte_lock_annotations.h +++ b/lib/eal/include/rte_lock_annotations.h @@ -40,6 +40,9 @@ extern "C" { #define __rte_unlock_function(...) \ __attribute__((unlock_function(__VA_ARGS__))) +#define __rte_locks_excluded(...) \ + __attribute__((locks_excluded(__VA_ARGS__))) + #define __rte_no_thread_safety_analysis \ __attribute__((no_thread_safety_analysis)) @@ -62,6 +65,8 @@ extern "C" { #define __rte_unlock_function(...) +#define __rte_locks_excluded(...) + #define __rte_no_thread_safety_analysis #endif /* RTE_ANNOTATE_LOCKS */ diff --git a/lib/eal/include/rte_mcslock.h b/lib/eal/include/rte_mcslock.h index a805cb2906d..2ca967f9c17 100644 --- a/lib/eal/include/rte_mcslock.h +++ b/lib/eal/include/rte_mcslock.h @@ -27,13 +27,14 @@ extern "C" { #include #include #include +#include /** * The rte_mcslock_t type. */ typedef struct rte_mcslock { - struct rte_mcslock *next; - int locked; /* 1 if the queue locked, 0 otherwise */ + RTE_ATOMIC(struct rte_mcslock *) next; + RTE_ATOMIC(int) locked; /* 1 if the queue locked, 0 otherwise */ } rte_mcslock_t; /** @@ -48,13 +49,13 @@ typedef struct rte_mcslock { * lock should use its 'own node'. */ static inline void -rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) +rte_mcslock_lock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me) { rte_mcslock_t *prev; /* Init me node */ - __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED); - __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&me->locked, 1, rte_memory_order_relaxed); + rte_atomic_store_explicit(&me->next, NULL, rte_memory_order_relaxed); /* If the queue is empty, the exchange operation is enough to acquire * the lock. Hence, the exchange operation requires acquire semantics. @@ -62,7 +63,7 @@ rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) * visible to other CPUs/threads. Hence, the exchange operation requires * release semantics as well. */ - prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL); + prev = rte_atomic_exchange_explicit(msl, me, rte_memory_order_acq_rel); if (likely(prev == NULL)) { /* Queue was empty, no further action required, * proceed with lock taken. @@ -76,19 +77,19 @@ rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) * strong as a release fence and is not sufficient to enforce the * desired order here. */ - __atomic_store_n(&prev->next, me, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&prev->next, me, rte_memory_order_release); /* The while-load of me->locked should not move above the previous * store to prev->next. Otherwise it will cause a deadlock. Need a * store-load barrier. */ - __atomic_thread_fence(__ATOMIC_ACQ_REL); + __rte_atomic_thread_fence(rte_memory_order_acq_rel); /* If the lock has already been acquired, it first atomically * places the node at the end of the queue and then proceeds * to spin on me->locked until the previous lock holder resets * the me->locked using mcslock_unlock(). */ - rte_wait_until_equal_32((uint32_t *)&me->locked, 0, __ATOMIC_ACQUIRE); + rte_wait_until_equal_32((uint32_t *)(uintptr_t)&me->locked, 0, rte_memory_order_acquire); } /** @@ -100,34 +101,33 @@ rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) * A pointer to the node of MCS lock passed in rte_mcslock_lock. */ static inline void -rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me) +rte_mcslock_unlock(RTE_ATOMIC(rte_mcslock_t *) *msl, RTE_ATOMIC(rte_mcslock_t *) me) { /* Check if there are more nodes in the queue. */ - if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) { + if (likely(rte_atomic_load_explicit(&me->next, rte_memory_order_relaxed) == NULL)) { /* No, last member in the queue. */ - rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED); + rte_mcslock_t *save_me = rte_atomic_load_explicit(&me, rte_memory_order_relaxed); /* Release the lock by setting it to NULL */ - if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0, - __ATOMIC_RELEASE, __ATOMIC_RELAXED))) + if (likely(rte_atomic_compare_exchange_strong_explicit(msl, &save_me, NULL, + rte_memory_order_release, rte_memory_order_relaxed))) return; /* Speculative execution would be allowed to read in the * while-loop first. This has the potential to cause a * deadlock. Need a load barrier. */ - __atomic_thread_fence(__ATOMIC_ACQUIRE); + __rte_atomic_thread_fence(rte_memory_order_acquire); /* More nodes added to the queue by other CPUs. * Wait until the next pointer is set. */ - uintptr_t *next; - next = (uintptr_t *)&me->next; - RTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0, - __ATOMIC_RELAXED); + RTE_ATOMIC(uintptr_t) *next; + next = (__rte_atomic uintptr_t *)&me->next; + RTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0, rte_memory_order_relaxed); } /* Pass lock to next waiter. */ - __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&me->next->locked, 0, rte_memory_order_release); } /** @@ -141,10 +141,10 @@ rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me) * 1 if the lock is successfully taken; 0 otherwise. */ static inline int -rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me) +rte_mcslock_trylock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me) { /* Init me node */ - __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&me->next, NULL, rte_memory_order_relaxed); /* Try to lock */ rte_mcslock_t *expected = NULL; @@ -155,8 +155,8 @@ rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me) * is visible to other CPUs/threads. Hence, the compare-exchange * operation requires release semantics as well. */ - return __atomic_compare_exchange_n(msl, &expected, me, 0, - __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); + return rte_atomic_compare_exchange_strong_explicit(msl, &expected, me, + rte_memory_order_acq_rel, rte_memory_order_relaxed); } /** @@ -168,9 +168,9 @@ rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me) * 1 if the lock is currently taken; 0 otherwise. */ static inline int -rte_mcslock_is_locked(rte_mcslock_t *msl) +rte_mcslock_is_locked(RTE_ATOMIC(rte_mcslock_t *) msl) { - return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL); + return (rte_atomic_load_explicit(&msl, rte_memory_order_relaxed) != NULL); } #ifdef __cplusplus diff --git a/lib/eal/include/rte_memory.h b/lib/eal/include/rte_memory.h index 3a1c607228b..842362d5272 100644 --- a/lib/eal/include/rte_memory.h +++ b/lib/eal/include/rte_memory.h @@ -22,6 +22,7 @@ extern "C" { #include #include #include +#include #include #define RTE_PGSIZE_4K (1ULL << 12) @@ -250,7 +251,8 @@ rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg); * -1 if user function reported error */ int -rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg); +rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg) + __rte_locks_excluded(rte_mcfg_mem_get_lock()); /** * Walk list of all memsegs without performing any locking. diff --git a/lib/eal/include/rte_per_lcore.h b/lib/eal/include/rte_per_lcore.h index eaedf0cb37c..41fe1f0ec01 100644 --- a/lib/eal/include/rte_per_lcore.h +++ b/lib/eal/include/rte_per_lcore.h @@ -22,8 +22,13 @@ extern "C" { #endif -#include +#ifdef RTE_TOOLCHAIN_MSVC +#define RTE_DEFINE_PER_LCORE(type, name) \ + __declspec(thread) typeof(type) per_lcore_##name +#define RTE_DECLARE_PER_LCORE(type, name) \ + extern __declspec(thread) typeof(type) per_lcore_##name +#else /** * Macro to define a per lcore variable "var" of type "type", don't * use keywords like "static" or "volatile" in type, just prefix the @@ -37,6 +42,7 @@ extern "C" { */ #define RTE_DECLARE_PER_LCORE(type, name) \ extern __thread __typeof__(type) per_lcore_##name +#endif /** * Read/write the per-lcore variable value diff --git a/lib/eal/include/rte_pflock.h b/lib/eal/include/rte_pflock.h index a3f7291fa1f..553504bb6dd 100644 --- a/lib/eal/include/rte_pflock.h +++ b/lib/eal/include/rte_pflock.h @@ -34,14 +34,15 @@ extern "C" { #include #include #include +#include /** * The rte_pflock_t type. */ struct rte_pflock { struct { - uint16_t in; - uint16_t out; + RTE_ATOMIC(uint16_t) in; + RTE_ATOMIC(uint16_t) out; } rd, wr; }; typedef struct rte_pflock rte_pflock_t; @@ -116,14 +117,13 @@ rte_pflock_read_lock(rte_pflock_t *pf) * If no writer is present, then the operation has completed * successfully. */ - w = __atomic_fetch_add(&pf->rd.in, RTE_PFLOCK_RINC, __ATOMIC_ACQUIRE) + w = rte_atomic_fetch_add_explicit(&pf->rd.in, RTE_PFLOCK_RINC, rte_memory_order_acquire) & RTE_PFLOCK_WBITS; if (w == 0) return; /* Wait for current write phase to complete. */ - RTE_WAIT_UNTIL_MASKED(&pf->rd.in, RTE_PFLOCK_WBITS, !=, w, - __ATOMIC_ACQUIRE); + RTE_WAIT_UNTIL_MASKED(&pf->rd.in, RTE_PFLOCK_WBITS, !=, w, rte_memory_order_acquire); } /** @@ -139,7 +139,7 @@ __rte_experimental static inline void rte_pflock_read_unlock(rte_pflock_t *pf) { - __atomic_fetch_add(&pf->rd.out, RTE_PFLOCK_RINC, __ATOMIC_RELEASE); + rte_atomic_fetch_add_explicit(&pf->rd.out, RTE_PFLOCK_RINC, rte_memory_order_release); } /** @@ -160,8 +160,9 @@ rte_pflock_write_lock(rte_pflock_t *pf) /* Acquire ownership of write-phase. * This is same as rte_ticketlock_lock(). */ - ticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED); - rte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE); + ticket = rte_atomic_fetch_add_explicit(&pf->wr.in, 1, rte_memory_order_relaxed); + rte_wait_until_equal_16((uint16_t *)(uintptr_t)&pf->wr.out, ticket, + rte_memory_order_acquire); /* * Acquire ticket on read-side in order to allow them @@ -172,10 +173,11 @@ rte_pflock_write_lock(rte_pflock_t *pf) * speculatively. */ w = RTE_PFLOCK_PRES | (ticket & RTE_PFLOCK_PHID); - ticket = __atomic_fetch_add(&pf->rd.in, w, __ATOMIC_RELAXED); + ticket = rte_atomic_fetch_add_explicit(&pf->rd.in, w, rte_memory_order_relaxed); /* Wait for any pending readers to flush. */ - rte_wait_until_equal_16(&pf->rd.out, ticket, __ATOMIC_ACQUIRE); + rte_wait_until_equal_16((uint16_t *)(uintptr_t)&pf->rd.out, ticket, + rte_memory_order_acquire); } /** @@ -192,10 +194,10 @@ static inline void rte_pflock_write_unlock(rte_pflock_t *pf) { /* Migrate from write phase to read phase. */ - __atomic_fetch_and(&pf->rd.in, RTE_PFLOCK_LSB, __ATOMIC_RELEASE); + rte_atomic_fetch_and_explicit(&pf->rd.in, RTE_PFLOCK_LSB, rte_memory_order_release); /* Allow other writers to continue. */ - __atomic_fetch_add(&pf->wr.out, 1, __ATOMIC_RELEASE); + rte_atomic_fetch_add_explicit(&pf->wr.out, 1, rte_memory_order_release); } #ifdef __cplusplus diff --git a/lib/eal/include/rte_random.h b/lib/eal/include/rte_random.h index 2edf5d210b4..c12eb07ea81 100644 --- a/lib/eal/include/rte_random.h +++ b/lib/eal/include/rte_random.h @@ -27,8 +27,8 @@ extern "C" { * value. * * This function is not multi-thread safe in regards to other - * rte_srand() calls, nor is it in relation to concurrent rte_rand() - * calls. + * rte_srand() calls, nor is it in relation to concurrent rte_rand(), + * rte_rand_max() or rte_drand() calls. * * @param seedval * The value of the seed. @@ -41,8 +41,9 @@ rte_srand(uint64_t seedval); * * The generator is not cryptographically secure. * - * If called from EAL threads or registered non-EAL threads, this function - * is thread-safe. + * rte_rand(), rte_rand_max() and rte_drand() are multi-thread safe, + * with the exception that they may not be called by multiple + * _unregistered_ non-EAL threads in parallel. * * @return * A pseudo-random value between 0 and (1<<64)-1. @@ -56,8 +57,9 @@ rte_rand(void); * This function returns an uniformly distributed (unbiased) random * number less than a user-specified maximum value. * - * If called from EAL threads or registered non-EAL threads, this function - * is thread-safe. + * rte_rand(), rte_rand_max() and rte_drand() are multi-thread safe, + * with the exception that they may not be called by multiple + * _unregistered_ non-EAL threads in parallel. * * @param upper_bound * The upper bound of the generated number. @@ -78,8 +80,9 @@ rte_rand_max(uint64_t upper_bound); * * The generator is not cryptographically secure. * - * If called from EAL threads or registered non-EAL threads, this function - * is thread-safe. + * rte_rand(), rte_rand_max() and rte_drand() are multi-thread safe, + * with the exception that they may not be called by multiple + * _unregistered_ non-EAL threads in parallel. * * @return * A pseudo-random value between 0 and 1.0. diff --git a/lib/eal/include/rte_seqcount.h b/lib/eal/include/rte_seqcount.h index ff62708e1b7..4f9cefb3386 100644 --- a/lib/eal/include/rte_seqcount.h +++ b/lib/eal/include/rte_seqcount.h @@ -26,12 +26,13 @@ extern "C" { #include #include #include +#include /** * The RTE seqcount type. */ typedef struct { - uint32_t sn; /**< A sequence number for the protected data. */ + RTE_ATOMIC(uint32_t) sn; /**< A sequence number for the protected data. */ } rte_seqcount_t; /** @@ -105,11 +106,11 @@ __rte_experimental static inline uint32_t rte_seqcount_read_begin(const rte_seqcount_t *seqcount) { - /* __ATOMIC_ACQUIRE to prevent loads after (in program order) + /* rte_memory_order_acquire to prevent loads after (in program order) * from happening before the sn load. Synchronizes-with the * store release in rte_seqcount_write_end(). */ - return __atomic_load_n(&seqcount->sn, __ATOMIC_ACQUIRE); + return rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_acquire); } /** @@ -160,9 +161,9 @@ rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn) return true; /* make sure the data loads happens before the sn load */ - rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + rte_atomic_thread_fence(rte_memory_order_acquire); - end_sn = __atomic_load_n(&seqcount->sn, __ATOMIC_RELAXED); + end_sn = rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_relaxed); /* A writer incremented the sequence number during this read * critical section. @@ -204,12 +205,12 @@ rte_seqcount_write_begin(rte_seqcount_t *seqcount) sn = seqcount->sn + 1; - __atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_relaxed); - /* __ATOMIC_RELEASE to prevent stores after (in program order) + /* rte_memory_order_release to prevent stores after (in program order) * from happening before the sn store. */ - rte_atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); } /** @@ -236,7 +237,7 @@ rte_seqcount_write_end(rte_seqcount_t *seqcount) sn = seqcount->sn + 1; /* Synchronizes-with the load acquire in rte_seqcount_read_begin(). */ - __atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_release); } #ifdef __cplusplus diff --git a/lib/eal/include/rte_stdatomic.h b/lib/eal/include/rte_stdatomic.h new file mode 100644 index 00000000000..8579e2d4e51 --- /dev/null +++ b/lib/eal/include/rte_stdatomic.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Microsoft Corporation + */ + +#ifndef RTE_STDATOMIC_H +#define RTE_STDATOMIC_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef RTE_ENABLE_STDATOMIC +#ifdef __STDC_NO_ATOMICS__ +#error enable_stdatomic=true but atomics not supported by toolchain +#endif + +#include + +/* RTE_ATOMIC(type) is provided for use as a type specifier + * permitting designation of an rte atomic type. + */ +#define RTE_ATOMIC(type) _Atomic(type) + +/* __rte_atomic is provided for type qualification permitting + * designation of an rte atomic qualified type-name. + */ +#define __rte_atomic _Atomic + +/* The memory order is an enumerated type in C11. */ +typedef memory_order rte_memory_order; + +#define rte_memory_order_relaxed memory_order_relaxed +#ifdef __ATOMIC_RELAXED +static_assert(rte_memory_order_relaxed == __ATOMIC_RELAXED, + "rte_memory_order_relaxed == __ATOMIC_RELAXED"); +#endif + +#define rte_memory_order_consume memory_order_consume +#ifdef __ATOMIC_CONSUME +static_assert(rte_memory_order_consume == __ATOMIC_CONSUME, + "rte_memory_order_consume == __ATOMIC_CONSUME"); +#endif + +#define rte_memory_order_acquire memory_order_acquire +#ifdef __ATOMIC_ACQUIRE +static_assert(rte_memory_order_acquire == __ATOMIC_ACQUIRE, + "rte_memory_order_acquire == __ATOMIC_ACQUIRE"); +#endif + +#define rte_memory_order_release memory_order_release +#ifdef __ATOMIC_RELEASE +static_assert(rte_memory_order_release == __ATOMIC_RELEASE, + "rte_memory_order_release == __ATOMIC_RELEASE"); +#endif + +#define rte_memory_order_acq_rel memory_order_acq_rel +#ifdef __ATOMIC_ACQ_REL +static_assert(rte_memory_order_acq_rel == __ATOMIC_ACQ_REL, + "rte_memory_order_acq_rel == __ATOMIC_ACQ_REL"); +#endif + +#define rte_memory_order_seq_cst memory_order_seq_cst +#ifdef __ATOMIC_SEQ_CST +static_assert(rte_memory_order_seq_cst == __ATOMIC_SEQ_CST, + "rte_memory_order_seq_cst == __ATOMIC_SEQ_CST"); +#endif + +#define rte_atomic_load_explicit(ptr, memorder) \ + atomic_load_explicit(ptr, memorder) + +#define rte_atomic_store_explicit(ptr, val, memorder) \ + atomic_store_explicit(ptr, val, memorder) + +#define rte_atomic_exchange_explicit(ptr, val, memorder) \ + atomic_exchange_explicit(ptr, val, memorder) + +#define rte_atomic_compare_exchange_strong_explicit(ptr, expected, desired, \ + succ_memorder, fail_memorder) \ + atomic_compare_exchange_strong_explicit(ptr, expected, desired, \ + succ_memorder, fail_memorder) + +#define rte_atomic_compare_exchange_weak_explicit(ptr, expected, desired, \ + succ_memorder, fail_memorder) \ + atomic_compare_exchange_weak_explicit(ptr, expected, desired, \ + succ_memorder, fail_memorder) + +#define rte_atomic_fetch_add_explicit(ptr, val, memorder) \ + atomic_fetch_add_explicit(ptr, val, memorder) + +#define rte_atomic_fetch_sub_explicit(ptr, val, memorder) \ + atomic_fetch_sub_explicit(ptr, val, memorder) + +#define rte_atomic_fetch_and_explicit(ptr, val, memorder) \ + atomic_fetch_and_explicit(ptr, val, memorder) + +#define rte_atomic_fetch_xor_explicit(ptr, val, memorder) \ + atomic_fetch_xor_explicit(ptr, val, memorder) + +#define rte_atomic_fetch_or_explicit(ptr, val, memorder) \ + atomic_fetch_or_explicit(ptr, val, memorder) + +#define rte_atomic_fetch_nand_explicit(ptr, val, memorder) \ + atomic_fetch_nand_explicit(ptr, val, memorder) + +#define rte_atomic_flag_test_and_set_explicit(ptr, memorder) \ + atomic_flag_test_and_set_explicit(ptr, memorder) + +#define rte_atomic_flag_clear_explicit(ptr, memorder) \ + atomic_flag_clear_explicit(ptr, memorder) + +/* We provide internal macro here to allow conditional expansion + * in the body of the per-arch rte_atomic_thread_fence inline functions. + */ +#define __rte_atomic_thread_fence(memorder) \ + atomic_thread_fence(memorder) + +#else /* !RTE_ENABLE_STDATOMIC */ + +#define RTE_ATOMIC(type) type + +#define __rte_atomic + +/* The memory order is an integer type in GCC built-ins, + * not an enumerated type like in C11. + */ +typedef int rte_memory_order; + +#define rte_memory_order_relaxed __ATOMIC_RELAXED +#define rte_memory_order_consume __ATOMIC_CONSUME +#define rte_memory_order_acquire __ATOMIC_ACQUIRE +#define rte_memory_order_release __ATOMIC_RELEASE +#define rte_memory_order_acq_rel __ATOMIC_ACQ_REL +#define rte_memory_order_seq_cst __ATOMIC_SEQ_CST + +#define rte_atomic_load_explicit(ptr, memorder) \ + __atomic_load_n(ptr, memorder) + +#define rte_atomic_store_explicit(ptr, val, memorder) \ + __atomic_store_n(ptr, val, memorder) + +#define rte_atomic_exchange_explicit(ptr, val, memorder) \ + __atomic_exchange_n(ptr, val, memorder) + +#define rte_atomic_compare_exchange_strong_explicit(ptr, expected, desired, \ + succ_memorder, fail_memorder) \ + __atomic_compare_exchange_n(ptr, expected, desired, 0, \ + succ_memorder, fail_memorder) + +#define rte_atomic_compare_exchange_weak_explicit(ptr, expected, desired, \ + succ_memorder, fail_memorder) \ + __atomic_compare_exchange_n(ptr, expected, desired, 1, \ + succ_memorder, fail_memorder) + +#define rte_atomic_fetch_add_explicit(ptr, val, memorder) \ + __atomic_fetch_add(ptr, val, memorder) + +#define rte_atomic_fetch_sub_explicit(ptr, val, memorder) \ + __atomic_fetch_sub(ptr, val, memorder) + +#define rte_atomic_fetch_and_explicit(ptr, val, memorder) \ + __atomic_fetch_and(ptr, val, memorder) + +#define rte_atomic_fetch_xor_explicit(ptr, val, memorder) \ + __atomic_fetch_xor(ptr, val, memorder) + +#define rte_atomic_fetch_or_explicit(ptr, val, memorder) \ + __atomic_fetch_or(ptr, val, memorder) + +#define rte_atomic_fetch_nand_explicit(ptr, val, memorder) \ + __atomic_fetch_nand(ptr, val, memorder) + +#define rte_atomic_flag_test_and_set_explicit(ptr, memorder) \ + __atomic_test_and_set(ptr, memorder) + +#define rte_atomic_flag_clear_explicit(ptr, memorder) \ + __atomic_clear(ptr, memorder) + +/* We provide internal macro here to allow conditional expansion + * in the body of the per-arch rte_atomic_thread_fence inline functions. + */ +#define __rte_atomic_thread_fence(memorder) \ + __atomic_thread_fence(memorder) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_STDATOMIC_H */ diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 369e2375f6a..8da9d4d3fbc 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -23,6 +23,16 @@ extern "C" { #endif +/** Maximum thread name length (including '\0'). */ +#define RTE_THREAD_NAME_SIZE 16 +/* Old definition, aliased for compatibility. */ +#define RTE_MAX_THREAD_NAME_LEN RTE_THREAD_NAME_SIZE + +/** Thread name prefix automatically added to all internal threads. */ +#define RTE_THREAD_INTERNAL_PREFIX "dpdk-" +/** Maximum internal thread name length (including '\0'). */ +#define RTE_THREAD_INTERNAL_NAME_SIZE 11 + /** * Thread id descriptor. */ @@ -68,9 +78,6 @@ typedef struct { typedef struct eal_tls_key *rte_thread_key; /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Create a new thread that will invoke the 'thread_func' routine. * * @param thread_id @@ -89,15 +96,11 @@ typedef struct eal_tls_key *rte_thread_key; * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_create(rte_thread_t *thread_id, const rte_thread_attr_t *thread_attr, rte_thread_func thread_func, void *arg); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Create a control thread. * * Creates a control thread with the given name and attributes. The @@ -110,27 +113,49 @@ int rte_thread_create(rte_thread_t *thread_id, * Filled with the thread id of the new created thread. * @param name * The name of the control thread - * (max RTE_MAX_THREAD_NAME_LEN characters including '\0'). - * @param thread_attr - * Attributes for the new thread. + * (max RTE_THREAD_NAME_SIZE characters including '\0'). * @param thread_func * Function to be executed by the new thread. * @param arg - * Argument passed to start_routine. + * Argument passed to thread_func. * @return * On success, returns 0; on error, it returns a negative value * corresponding to the error number. */ -__rte_experimental int rte_thread_create_control(rte_thread_t *thread, const char *name, - const rte_thread_attr_t *thread_attr, rte_thread_func thread_func, - void *arg); + rte_thread_func thread_func, void *arg); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. + * Create an internal control thread. + * + * Creates a control thread with the given name prefixed. + * If setting the name of the thread fails, the error is ignored and logged. * + * The affinity of the new thread is based on the CPU affinity retrieved + * at the time rte_eal_init() was called, the EAL threads are then excluded. + * + * @param id + * Filled with the thread ID of the new created thread. + * @param name + * The name of the control thread. + * See RTE_THREAD_INTERNAL_NAME_SIZE for maximum length. + * The name of the driver or library should be first, + * then followed by a hyphen and more details. + * It will be prefixed with RTE_THREAD_INTERNAL_PREFIX by this function. + * @param func + * Function to be executed by the new thread. + * @param arg + * Argument passed to func. + * @return + * On success, returns 0; a negative value otherwise. + */ +__rte_internal +int +rte_thread_create_internal_control(rte_thread_t *id, const char *name, + rte_thread_func func, void *arg); + +/** * Waits for the thread identified by 'thread_id' to terminate * * @param thread_id @@ -143,13 +168,9 @@ rte_thread_create_control(rte_thread_t *thread, const char *name, * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Indicate that the return value of the thread is not needed and * all thread resources should be release when the thread terminates. * @@ -160,26 +181,19 @@ int rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr); * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_detach(rte_thread_t thread_id); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get the id of the calling thread. * * @return * Return the thread id of the calling thread. */ -__rte_experimental rte_thread_t rte_thread_self(void); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Set the name of the thread. + * * This API is a noop if the underlying platform does not * support setting the thread name or the platform-specific * API used to set the thread name fails. @@ -188,17 +202,33 @@ rte_thread_t rte_thread_self(void); * The id of the thread to set name. * * @param thread_name - * The name to set. Truncated to RTE_MAX_THREAD_NAME_LEN, + * The name to set. Truncated to RTE_THREAD_NAME_SIZE, * including terminating NUL if necessary. */ -__rte_experimental void rte_thread_set_name(rte_thread_t thread_id, const char *thread_name); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. + * Set the name of an internal thread with the common prefix. + * + * This API is a noop if the underlying platform does not support + * setting the thread name, or if it fails. * + * @param id + * The ID of the thread to set name. + * + * @param name + * The name to set after being prefixed. + * See RTE_THREAD_INTERNAL_NAME_SIZE for maximum length. + * The name of the driver or library should be first, + * then followed by a hyphen and more details. + * It will be prefixed with RTE_THREAD_INTERNAL_PREFIX by this function. + */ +__rte_internal +void +rte_thread_set_prefixed_name(rte_thread_t id, const char *name); + +/** * Check if 2 thread ids are equal. * * @param t1 @@ -211,13 +241,9 @@ rte_thread_set_name(rte_thread_t thread_id, const char *thread_name); * If the ids are equal, return nonzero. * Otherwise, return 0. */ -__rte_experimental int rte_thread_equal(rte_thread_t t1, rte_thread_t t2); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Initialize the attributes of a thread. * These attributes can be passed to the rte_thread_create() function * that will create a new thread and set its attributes according to attr. @@ -229,13 +255,9 @@ int rte_thread_equal(rte_thread_t t1, rte_thread_t t2); * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_attr_init(rte_thread_attr_t *attr); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Set the thread priority value in the thread attributes pointed to * by 'thread_attr'. * @@ -249,16 +271,12 @@ int rte_thread_attr_init(rte_thread_attr_t *attr); * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, enum rte_thread_priority priority); #ifdef RTE_HAS_CPUSET /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Set the CPU affinity value in the thread attributes pointed to * by 'thread_attr'. * @@ -272,14 +290,10 @@ int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get the value of CPU affinity that is set in the thread attributes pointed * to by 'thread_attr'. * @@ -293,14 +307,10 @@ int rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Set the affinity of thread 'thread_id' to the cpu set * specified by 'cpuset'. * @@ -314,14 +324,10 @@ int rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_set_affinity_by_id(rte_thread_t thread_id, const rte_cpuset_t *cpuset); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get the affinity of thread 'thread_id' and store it * in 'cpuset'. * @@ -335,7 +341,6 @@ int rte_thread_set_affinity_by_id(rte_thread_t thread_id, * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_get_affinity_by_id(rte_thread_t thread_id, rte_cpuset_t *cpuset); @@ -362,9 +367,6 @@ void rte_thread_get_affinity(rte_cpuset_t *cpusetp); #endif /* RTE_HAS_CPUSET */ /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get the priority of a thread. * * @param thread_id @@ -377,14 +379,10 @@ void rte_thread_get_affinity(rte_cpuset_t *cpusetp); * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_get_priority(rte_thread_t thread_id, enum rte_thread_priority *priority); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Set the priority of a thread. * * @param thread_id @@ -397,7 +395,6 @@ int rte_thread_get_priority(rte_thread_t thread_id, * On success, return 0. * On failure, return a positive errno-style error number. */ -__rte_experimental int rte_thread_set_priority(rte_thread_t thread_id, enum rte_thread_priority priority); @@ -419,7 +416,6 @@ int rte_thread_set_priority(rte_thread_t thread_id, * ENOEXEC - Specific OS error. */ -__rte_experimental int rte_thread_key_create(rte_thread_key *key, void (*destructor)(void *)); @@ -435,7 +431,6 @@ int rte_thread_key_create(rte_thread_key *key, * rte_errno can be: EINVAL - Invalid parameter passed. * ENOEXEC - Specific OS error. */ -__rte_experimental int rte_thread_key_delete(rte_thread_key key); /** @@ -452,7 +447,6 @@ int rte_thread_key_delete(rte_thread_key key); * rte_errno can be: EINVAL - Invalid parameter passed. * ENOEXEC - Specific OS error. */ -__rte_experimental int rte_thread_value_set(rte_thread_key key, const void *value); /** @@ -467,7 +461,6 @@ int rte_thread_value_set(rte_thread_key key, const void *value); * rte_errno can be: EINVAL - Invalid parameter passed. * ENOEXEC - Specific OS error. */ -__rte_experimental void *rte_thread_value_get(rte_thread_key key); #ifdef __cplusplus diff --git a/lib/eal/include/rte_ticketlock.h b/lib/eal/include/rte_ticketlock.h index 5db0d8ae926..73884eb07bb 100644 --- a/lib/eal/include/rte_ticketlock.h +++ b/lib/eal/include/rte_ticketlock.h @@ -24,15 +24,16 @@ extern "C" { #include #include #include +#include /** * The rte_ticketlock_t type. */ typedef union { - uint32_t tickets; + RTE_ATOMIC(uint32_t) tickets; struct { - uint16_t current; - uint16_t next; + RTE_ATOMIC(uint16_t) current; + RTE_ATOMIC(uint16_t) next; } s; } rte_ticketlock_t; @@ -50,7 +51,7 @@ typedef union { static inline void rte_ticketlock_init(rte_ticketlock_t *tl) { - __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tl->tickets, 0, rte_memory_order_relaxed); } /** @@ -62,8 +63,9 @@ rte_ticketlock_init(rte_ticketlock_t *tl) static inline void rte_ticketlock_lock(rte_ticketlock_t *tl) { - uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED); - rte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE); + uint16_t me = rte_atomic_fetch_add_explicit(&tl->s.next, 1, rte_memory_order_relaxed); + rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tl->s.current, me, + rte_memory_order_acquire); } /** @@ -75,8 +77,8 @@ rte_ticketlock_lock(rte_ticketlock_t *tl) static inline void rte_ticketlock_unlock(rte_ticketlock_t *tl) { - uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED); - __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE); + uint16_t i = rte_atomic_load_explicit(&tl->s.current, rte_memory_order_relaxed); + rte_atomic_store_explicit(&tl->s.current, i + 1, rte_memory_order_release); } /** @@ -91,12 +93,13 @@ static inline int rte_ticketlock_trylock(rte_ticketlock_t *tl) { rte_ticketlock_t oldl, newl; - oldl.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED); + oldl.tickets = rte_atomic_load_explicit(&tl->tickets, rte_memory_order_relaxed); newl.tickets = oldl.tickets; newl.s.next++; if (oldl.s.next == oldl.s.current) { - if (__atomic_compare_exchange_n(&tl->tickets, &oldl.tickets, - newl.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + if (rte_atomic_compare_exchange_strong_explicit(&tl->tickets, + (uint32_t *)(uintptr_t)&oldl.tickets, newl.tickets, + rte_memory_order_acquire, rte_memory_order_relaxed)) return 1; } @@ -115,7 +118,7 @@ static inline int rte_ticketlock_is_locked(rte_ticketlock_t *tl) { rte_ticketlock_t tic; - tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE); + tic.tickets = rte_atomic_load_explicit(&tl->tickets, rte_memory_order_acquire); return (tic.s.current != tic.s.next); } @@ -126,7 +129,7 @@ rte_ticketlock_is_locked(rte_ticketlock_t *tl) typedef struct { rte_ticketlock_t tl; /**< the actual ticketlock */ - int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */ + RTE_ATOMIC(int) user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */ unsigned int count; /**< count of time this lock has been called */ } rte_ticketlock_recursive_t; @@ -146,7 +149,7 @@ static inline void rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr) { rte_ticketlock_init(&tlr->tl); - __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tlr->user, TICKET_LOCK_INVALID_ID, rte_memory_order_relaxed); tlr->count = 0; } @@ -161,9 +164,9 @@ rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr) { int id = rte_gettid(); - if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) { + if (rte_atomic_load_explicit(&tlr->user, rte_memory_order_relaxed) != id) { rte_ticketlock_lock(&tlr->tl); - __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tlr->user, id, rte_memory_order_relaxed); } tlr->count++; } @@ -178,8 +181,8 @@ static inline void rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr) { if (--(tlr->count) == 0) { - __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tlr->user, TICKET_LOCK_INVALID_ID, + rte_memory_order_relaxed); rte_ticketlock_unlock(&tlr->tl); } } @@ -197,10 +200,10 @@ rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr) { int id = rte_gettid(); - if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) { + if (rte_atomic_load_explicit(&tlr->user, rte_memory_order_relaxed) != id) { if (rte_ticketlock_trylock(&tlr->tl) == 0) return 0; - __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tlr->user, id, rte_memory_order_relaxed); } tlr->count++; return 1; diff --git a/lib/eal/include/rte_trace_point.h b/lib/eal/include/rte_trace_point.h index c6b6fccda5d..41e2a7f99ed 100644 --- a/lib/eal/include/rte_trace_point.h +++ b/lib/eal/include/rte_trace_point.h @@ -28,11 +28,12 @@ extern "C" { #include #include #include +#include #include #include /** The tracepoint object. */ -typedef uint64_t rte_trace_point_t; +typedef RTE_ATOMIC(uint64_t) rte_trace_point_t; /** * Macro to define the tracepoint arguments in RTE_TRACE_POINT macro. @@ -358,7 +359,7 @@ __rte_trace_point_emit_ev_header(void *mem, uint64_t in) #define __rte_trace_point_emit_header_generic(t) \ void *mem; \ do { \ - const uint64_t val = __atomic_load_n(t, __ATOMIC_ACQUIRE); \ + const uint64_t val = rte_atomic_load_explicit(t, rte_memory_order_acquire); \ if (likely(!(val & __RTE_TRACE_FIELD_ENABLE_MASK))) \ return; \ mem = __rte_trace_mem_get(val); \ diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c index ed50576d990..5f4b2fb0054 100644 --- a/lib/eal/linux/eal.c +++ b/lib/eal/linux/eal.c @@ -970,7 +970,7 @@ rte_eal_init(int argc, char **argv) static uint32_t run_once; uint32_t has_run = 0; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; + char thread_name[RTE_THREAD_NAME_SIZE]; bool phys_addrs; const struct rte_config *config = rte_eal_get_configuration(); struct internal_config *internal_conf = @@ -1253,7 +1253,7 @@ rte_eal_init(int argc, char **argv) /* Set thread_name for aid in debugging. */ snprintf(thread_name, sizeof(thread_name), - "rte-worker-%d", i); + "dpdk-worker%d", i); rte_thread_set_name(lcore_config[i].thread_id, thread_name); ret = rte_thread_set_affinity_by_id(lcore_config[i].thread_id, diff --git a/lib/eal/linux/eal_alarm.c b/lib/eal/linux/eal_alarm.c index 397f25d7d58..766ba2c251c 100644 --- a/lib/eal/linux/eal_alarm.c +++ b/lib/eal/linux/eal_alarm.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/lib/eal/linux/eal_interrupts.c b/lib/eal/linux/eal_interrupts.c index c9881143be5..24fff3d3c26 100644 --- a/lib/eal/linux/eal_interrupts.c +++ b/lib/eal/linux/eal_interrupts.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -19,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -89,7 +89,7 @@ static union intr_pipefds intr_pipe; static struct rte_intr_source_list intr_sources; /* interrupt handling thread */ -static pthread_t intr_thread; +static rte_thread_t intr_thread; /* VFIO interrupts */ #ifdef VFIO_PRESENT @@ -1103,7 +1103,7 @@ eal_intr_handle_interrupts(int pfd, unsigned totalfds) * @return * never return; */ -static __rte_noreturn void * +static __rte_noreturn uint32_t eal_intr_thread_main(__rte_unused void *arg) { /* host thread, never break out */ @@ -1188,7 +1188,7 @@ rte_eal_intr_init(void) } /* create the host thread to wait/handle the interrupt */ - ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL, + ret = rte_thread_create_internal_control(&intr_thread, "intr", eal_intr_thread_main, NULL); if (ret != 0) { rte_errno = -ret; @@ -1601,5 +1601,5 @@ rte_intr_cap_multiple(struct rte_intr_handle *intr_handle) int rte_thread_is_intr(void) { - return pthread_equal(intr_thread, pthread_self()); + return rte_thread_equal(intr_thread, rte_thread_self()); } diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c index f8b1588cae4..9853ec78a2b 100644 --- a/lib/eal/linux/eal_memalloc.c +++ b/lib/eal/linux/eal_memalloc.c @@ -1740,7 +1740,10 @@ eal_memalloc_init(void) eal_get_internal_configuration(); if (rte_eal_process_type() == RTE_PROC_SECONDARY) - if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0) + /* memory_hotplug_lock is held during initialization, so it's + * safe to call thread-unsafe version. + */ + if (rte_memseg_list_walk_thread_unsafe(secondary_msl_create_walk, NULL) < 0) return -1; if (rte_eal_process_type() == RTE_PROC_PRIMARY && internal_conf->in_memory) { @@ -1778,7 +1781,7 @@ eal_memalloc_init(void) } /* initialize all of the fd lists */ - if (rte_memseg_list_walk(fd_list_create_walk, NULL)) + if (rte_memseg_list_walk_thread_unsafe(fd_list_create_walk, NULL)) return -1; return 0; } diff --git a/lib/eal/linux/eal_thread.c b/lib/eal/linux/eal_thread.c index c07ad9d8a40..880070c6270 100644 --- a/lib/eal/linux/eal_thread.c +++ b/lib/eal/linux/eal_thread.c @@ -24,7 +24,7 @@ void rte_thread_set_name(rte_thread_t thread_id, const char *thread_name) int ret = ENOSYS; #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if __GLIBC_PREREQ(2, 12) - char truncated[RTE_MAX_THREAD_NAME_LEN]; + char truncated[RTE_THREAD_NAME_SIZE]; const size_t truncatedsz = sizeof(truncated); if (strlcpy(truncated, thread_name, truncatedsz) >= truncatedsz) @@ -39,19 +39,3 @@ void rte_thread_set_name(rte_thread_t thread_id, const char *thread_name) if (ret != 0) RTE_LOG(DEBUG, EAL, "Failed to set thread name\n"); } - -int rte_thread_setname(pthread_t id, const char *name) -{ - int ret = ENOSYS; -#if defined(__GLIBC__) && defined(__GLIBC_PREREQ) -#if __GLIBC_PREREQ(2, 12) - char truncated[16]; - - strlcpy(truncated, name, sizeof(truncated)); - ret = pthread_setname_np(id, truncated); -#endif -#endif - RTE_SET_USED(id); - RTE_SET_USED(name); - return -ret; -} diff --git a/lib/eal/linux/eal_timer.c b/lib/eal/linux/eal_timer.c index 95c8cca9925..3a30284e3a5 100644 --- a/lib/eal/linux/eal_timer.c +++ b/lib/eal/linux/eal_timer.c @@ -14,6 +14,7 @@ #include #include +#include #include "eal_private.h" @@ -71,14 +72,14 @@ static uint64_t eal_hpet_resolution_hz = 0; /* Incremented 4 times during one 32bits hpet full count */ static uint32_t eal_hpet_msb; -static pthread_t msb_inc_thread_id; +static rte_thread_t msb_inc_thread_id; /* * This function runs on a specific thread to update a global variable * containing used to process MSB of the HPET (unfortunately, we need * this because hpet is 32 bits by default under linux). */ -static void * +static uint32_t hpet_msb_inc(__rte_unused void *arg) { uint32_t t; @@ -89,7 +90,7 @@ hpet_msb_inc(__rte_unused void *arg) eal_hpet_msb ++; sleep(10); } - return NULL; + return 0; } uint64_t @@ -176,8 +177,8 @@ rte_eal_hpet_init(int make_default) /* create a thread that will increment a global variable for * msb (hpet is 32 bits by default under linux) */ - ret = rte_ctrl_thread_create(&msb_inc_thread_id, "hpet-msb-inc", NULL, - hpet_msb_inc, NULL); + ret = rte_thread_create_internal_control(&msb_inc_thread_id, "hpet-msb", + hpet_msb_inc, NULL); if (ret != 0) { RTE_LOG(ERR, EAL, "ERROR: Cannot create HPET timer thread!\n"); internal_conf->no_hpet = 1; diff --git a/lib/eal/linux/eal_vfio.c b/lib/eal/linux/eal_vfio.c index 56edccb0db2..ad3c1654b24 100644 --- a/lib/eal/linux/eal_vfio.c +++ b/lib/eal/linux/eal_vfio.c @@ -1682,7 +1682,7 @@ spapr_dma_win_size(void) RTE_LOG(DEBUG, EAL, "Setting DMA window size to 0x%" PRIx64 "\n", spapr_dma_win_len); spapr_dma_win_page_sz = param.page_sz; - rte_mem_set_dma_mask(__builtin_ctzll(spapr_dma_win_len)); + rte_mem_set_dma_mask(rte_ctz64(spapr_dma_win_len)); return 0; } @@ -1720,7 +1720,7 @@ vfio_spapr_create_dma_window(int vfio_container_fd) /* create a new DMA window (start address is not selectable) */ create.window_size = spapr_dma_win_len; - create.page_shift = __builtin_ctzll(spapr_dma_win_page_sz); + create.page_shift = rte_ctz64(spapr_dma_win_page_sz); create.levels = 1; ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); #ifdef VFIO_IOMMU_SPAPR_INFO_DDW diff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h index 3c8284517e8..0510b8f7816 100644 --- a/lib/eal/loongarch/include/rte_atomic.h +++ b/lib/eal/loongarch/include/rte_atomic.h @@ -35,9 +35,9 @@ extern "C" { #define rte_io_rmb() rte_mb() static __rte_always_inline void -rte_atomic_thread_fence(int memorder) +rte_atomic_thread_fence(rte_memory_order memorder) { - __atomic_thread_fence(memorder); + __rte_atomic_thread_fence(memorder); } #ifdef __cplusplus diff --git a/lib/eal/loongarch/include/rte_cpuflags.h b/lib/eal/loongarch/include/rte_cpuflags.h index 1c80779262c..6b592c147cd 100644 --- a/lib/eal/loongarch/include/rte_cpuflags.h +++ b/lib/eal/loongarch/include/rte_cpuflags.h @@ -26,8 +26,6 @@ enum rte_cpu_flag_t { RTE_CPUFLAG_LBT_X86, RTE_CPUFLAG_LBT_ARM, RTE_CPUFLAG_LBT_MIPS, - /* The last item */ - RTE_CPUFLAG_NUMFLAGS /**< This should always be the last! */ }; #include "generic/rte_cpuflags.h" diff --git a/lib/eal/loongarch/rte_cpuflags.c b/lib/eal/loongarch/rte_cpuflags.c index 0a75ca58d42..db9e28ef209 100644 --- a/lib/eal/loongarch/rte_cpuflags.c +++ b/lib/eal/loongarch/rte_cpuflags.c @@ -67,7 +67,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const struct feature_entry *feat; hwcap_registers_t regs = {0}; - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return -ENOENT; feat = &rte_cpu_feature_table[feature]; @@ -81,7 +81,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const char * rte_cpu_get_flag_name(enum rte_cpu_flag_t feature) { - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return NULL; return rte_cpu_feature_table[feature].name; } diff --git a/lib/eal/meson.build b/lib/eal/meson.build index 0fb974c78bf..99421043862 100644 --- a/lib/eal/meson.build +++ b/lib/eal/meson.build @@ -10,14 +10,6 @@ if not is_windows subdir('unix') endif -exec_envs = {'freebsd': 0, 'linux': 1, 'windows': 2} -foreach env, id:exec_envs - dpdk_conf.set('RTE_ENV_' + env.to_upper(), id) - dpdk_conf.set10('RTE_EXEC_ENV_IS_' + env.to_upper(), (exec_env == env)) -endforeach -dpdk_conf.set('RTE_EXEC_ENV', exec_envs[exec_env]) - -dpdk_conf.set('RTE_EXEC_ENV_' + exec_env.to_upper(), 1) subdir(exec_env) subdir(arch_subdir) diff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h index ec8d8a26d8e..73824120b91 100644 --- a/lib/eal/ppc/include/rte_atomic.h +++ b/lib/eal/ppc/include/rte_atomic.h @@ -38,9 +38,9 @@ extern "C" { #define rte_io_rmb() rte_rmb() static __rte_always_inline void -rte_atomic_thread_fence(int memorder) +rte_atomic_thread_fence(rte_memory_order memorder) { - __atomic_thread_fence(memorder); + __rte_atomic_thread_fence(memorder); } /*------------------------- 16 bit atomic operations -------------------------*/ @@ -48,8 +48,8 @@ rte_atomic_thread_fence(int memorder) static inline int rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) { - return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, - __ATOMIC_ACQUIRE) ? 1 : 0; + return __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire, + rte_memory_order_acquire) ? 1 : 0; } static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) @@ -60,29 +60,29 @@ static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) static inline void rte_atomic16_inc(rte_atomic16_t *v) { - __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE); + rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire); } static inline void rte_atomic16_dec(rte_atomic16_t *v) { - __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE); + rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire); } static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) { - return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0; + return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire) + 1 == 0; } static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) { - return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0; + return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire) - 1 == 0; } static inline uint16_t rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) { - return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); + return __atomic_exchange_2(dst, val, rte_memory_order_seq_cst); } /*------------------------- 32 bit atomic operations -------------------------*/ @@ -90,8 +90,8 @@ rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) static inline int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) { - return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, - __ATOMIC_ACQUIRE) ? 1 : 0; + return __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire, + rte_memory_order_acquire) ? 1 : 0; } static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) @@ -102,29 +102,29 @@ static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) static inline void rte_atomic32_inc(rte_atomic32_t *v) { - __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE); + rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire); } static inline void rte_atomic32_dec(rte_atomic32_t *v) { - __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE); + rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire); } static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) { - return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0; + return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire) + 1 == 0; } static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) { - return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0; + return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire) - 1 == 0; } static inline uint32_t rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) { - return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); + return __atomic_exchange_4(dst, val, rte_memory_order_seq_cst); } /*------------------------- 64 bit atomic operations -------------------------*/ @@ -132,8 +132,8 @@ rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) static inline int rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) { - return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, - __ATOMIC_ACQUIRE) ? 1 : 0; + return __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire, + rte_memory_order_acquire) ? 1 : 0; } static inline void @@ -157,47 +157,47 @@ rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) static inline void rte_atomic64_add(rte_atomic64_t *v, int64_t inc) { - __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE); + rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_acquire); } static inline void rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) { - __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE); + rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_acquire); } static inline void rte_atomic64_inc(rte_atomic64_t *v) { - __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE); + rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire); } static inline void rte_atomic64_dec(rte_atomic64_t *v) { - __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE); + rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire); } static inline int64_t rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) { - return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE) + inc; + return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_acquire) + inc; } static inline int64_t rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) { - return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE) - dec; + return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_acquire) - dec; } static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) { - return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0; + return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire) + 1 == 0; } static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) { - return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0; + return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire) - 1 == 0; } static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) @@ -213,7 +213,7 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) static inline uint64_t rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) { - return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); + return __atomic_exchange_8(dst, val, rte_memory_order_seq_cst); } #endif diff --git a/lib/eal/ppc/include/rte_byteorder.h b/lib/eal/ppc/include/rte_byteorder.h index 49f369f1e7b..de94e2ad321 100644 --- a/lib/eal/ppc/include/rte_byteorder.h +++ b/lib/eal/ppc/include/rte_byteorder.h @@ -62,15 +62,6 @@ static inline uint64_t rte_arch_bswap64(uint64_t _x) #define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ rte_constant_bswap64(x) : \ rte_arch_bswap64(x))) -#else -/* - * __builtin_bswap16 is only available gcc 4.8 and upwards - */ -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) -#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap16(x) : \ - rte_arch_bswap16(x))) -#endif #endif /* Power 8 have both little endian and big endian mode diff --git a/lib/eal/ppc/include/rte_cpuflags.h b/lib/eal/ppc/include/rte_cpuflags.h index a88355d170b..dedc1ab4691 100644 --- a/lib/eal/ppc/include/rte_cpuflags.h +++ b/lib/eal/ppc/include/rte_cpuflags.h @@ -48,8 +48,6 @@ enum rte_cpu_flag_t { RTE_CPUFLAG_DSCR, RTE_CPUFLAG_HTM, RTE_CPUFLAG_ARCH_2_07, - /* The last item */ - RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */ }; #include "generic/rte_cpuflags.h" diff --git a/lib/eal/ppc/rte_cpuflags.c b/lib/eal/ppc/rte_cpuflags.c index 61db5c216d7..d276c2cf88f 100644 --- a/lib/eal/ppc/rte_cpuflags.c +++ b/lib/eal/ppc/rte_cpuflags.c @@ -91,7 +91,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const struct feature_entry *feat; hwcap_registers_t regs = {0}; - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return -ENOENT; feat = &rte_cpu_feature_table[feature]; @@ -105,7 +105,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const char * rte_cpu_get_flag_name(enum rte_cpu_flag_t feature) { - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return NULL; return rte_cpu_feature_table[feature].name; } diff --git a/lib/eal/riscv/include/rte_atomic.h b/lib/eal/riscv/include/rte_atomic.h index 4b4633c9140..2603bc90ea1 100644 --- a/lib/eal/riscv/include/rte_atomic.h +++ b/lib/eal/riscv/include/rte_atomic.h @@ -40,9 +40,9 @@ extern "C" { #define rte_io_rmb() asm volatile("fence ir, ir" : : : "memory") static __rte_always_inline void -rte_atomic_thread_fence(int memorder) +rte_atomic_thread_fence(rte_memory_order memorder) { - __atomic_thread_fence(memorder); + __rte_atomic_thread_fence(memorder); } #ifdef __cplusplus diff --git a/lib/eal/riscv/include/rte_cpuflags.h b/lib/eal/riscv/include/rte_cpuflags.h index 66e787f8983..d742efc40ff 100644 --- a/lib/eal/riscv/include/rte_cpuflags.h +++ b/lib/eal/riscv/include/rte_cpuflags.h @@ -42,8 +42,6 @@ enum rte_cpu_flag_t { RTE_CPUFLAG_RISCV_ISA_X, /* Non-standard extension present */ RTE_CPUFLAG_RISCV_ISA_Y, /* Reserved */ RTE_CPUFLAG_RISCV_ISA_Z, /* Reserved */ - /* The last item */ - RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */ }; #include "generic/rte_cpuflags.h" diff --git a/lib/eal/riscv/rte_cpuflags.c b/lib/eal/riscv/rte_cpuflags.c index 4f6d29b9477..eb4105c18be 100644 --- a/lib/eal/riscv/rte_cpuflags.c +++ b/lib/eal/riscv/rte_cpuflags.c @@ -96,7 +96,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const struct feature_entry *feat; hwcap_registers_t regs = {0}; - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return -ENOENT; feat = &rte_cpu_feature_table[feature]; @@ -110,7 +110,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const char * rte_cpu_get_flag_name(enum rte_cpu_flag_t feature) { - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return NULL; return rte_cpu_feature_table[feature].name; } diff --git a/lib/eal/unix/eal_firmware.c b/lib/eal/unix/eal_firmware.c index d1616b0bd99..1a7cf8e7b76 100644 --- a/lib/eal/unix/eal_firmware.c +++ b/lib/eal/unix/eal_firmware.c @@ -25,19 +25,31 @@ static int firmware_open(struct firmware_read_ctx *ctx, const char *name, size_t blocksize) { struct archive_entry *e; + int err; ctx->a = archive_read_new(); if (ctx->a == NULL) return -1; - if (archive_read_support_format_raw(ctx->a) != ARCHIVE_OK || - archive_read_support_filter_xz(ctx->a) != ARCHIVE_OK || - archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK || - archive_read_next_header(ctx->a, &e) != ARCHIVE_OK) { - archive_read_free(ctx->a); - ctx->a = NULL; - return -1; - } + + if (archive_read_support_format_raw(ctx->a) != ARCHIVE_OK) + goto error; + + err = archive_read_support_filter_xz(ctx->a); + if (err != ARCHIVE_OK && err != ARCHIVE_WARN) + goto error; + + if (archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK) + goto error; + + if (archive_read_next_header(ctx->a, &e) != ARCHIVE_OK) + goto error; + return 0; + +error: + archive_read_free(ctx->a); + ctx->a = NULL; + return -1; } static ssize_t diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c index f4076122a46..36a21ab2f96 100644 --- a/lib/eal/unix/rte_thread.c +++ b/lib/eal/unix/rte_thread.c @@ -190,7 +190,7 @@ rte_thread_create(rte_thread_t *thread_id, pthread_mutex_unlock(&ctx.wrapper_mutex); if (ret != 0) - pthread_join((pthread_t)thread_id->opaque_id, NULL); + rte_thread_join(*thread_id, NULL); cleanup: if (attrp != NULL) diff --git a/lib/eal/version.map b/lib/eal/version.map index 7940431e5ab..e00a844805b 100644 --- a/lib/eal/version.map +++ b/lib/eal/version.map @@ -20,7 +20,6 @@ DPDK_24 { rte_cpu_get_flag_enabled; rte_cpu_get_flag_name; rte_cpu_is_supported; # WINDOWS_NO_EXPORT - rte_ctrl_thread_create; rte_cycles_vmware_tsc_map; # WINDOWS_NO_EXPORT rte_delay_us; rte_delay_us_block; @@ -257,12 +256,30 @@ DPDK_24 { rte_strscpy; rte_strsplit; rte_sys_gettid; + rte_thread_attr_get_affinity; + rte_thread_attr_init; + rte_thread_attr_set_affinity; + rte_thread_attr_set_priority; + rte_thread_create; + rte_thread_create_control; + rte_thread_detach; + rte_thread_equal; rte_thread_get_affinity; + rte_thread_get_affinity_by_id; + rte_thread_get_priority; rte_thread_is_intr; + rte_thread_join; + rte_thread_key_create; + rte_thread_key_delete; rte_thread_register; + rte_thread_self; rte_thread_set_affinity; - rte_thread_setname; + rte_thread_set_affinity_by_id; + rte_thread_set_name; + rte_thread_set_priority; rte_thread_unregister; + rte_thread_value_get; + rte_thread_value_set; rte_uuid_compare; rte_uuid_is_null; rte_uuid_parse; @@ -368,10 +385,6 @@ EXPERIMENTAL { # added in 21.05 rte_devargs_reset; rte_intr_callback_unregister_sync; - rte_thread_key_create; - rte_thread_key_delete; - rte_thread_value_get; - rte_thread_value_set; rte_version_minor; rte_version_month; rte_version_prefix; @@ -392,26 +405,9 @@ EXPERIMENTAL { # added in 22.07 rte_drand; - rte_thread_get_affinity_by_id; - rte_thread_get_priority; - rte_thread_self; - rte_thread_set_affinity_by_id; - rte_thread_set_priority; - - # added in 22.11 - rte_thread_attr_get_affinity; - rte_thread_attr_init; - rte_thread_attr_set_affinity; - rte_thread_attr_set_priority; - rte_thread_create; - rte_thread_detach; - rte_thread_equal; - rte_thread_join; # added in 23.03 rte_lcore_register_usage_cb; - rte_thread_create_control; - rte_thread_set_name; __rte_eal_trace_generic_blob; # added in 23.07 @@ -456,6 +452,7 @@ INTERNAL { rte_intr_vec_list_free; rte_intr_vec_list_index_get; rte_intr_vec_list_index_set; + rte_mcfg_ethdev_get_lock; rte_mcfg_mem_get_lock; rte_mcfg_mempool_get_lock; rte_mcfg_tailq_get_lock; @@ -464,4 +461,6 @@ INTERNAL { rte_mem_map; rte_mem_page_size; rte_mem_unmap; + rte_thread_create_internal_control; + rte_thread_set_prefixed_name; }; diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c index 22dc7f6ee19..7ec21522115 100644 --- a/lib/eal/windows/eal.c +++ b/lib/eal/windows/eal.c @@ -283,7 +283,7 @@ rte_eal_init(int argc, char **argv) enum rte_iova_mode iova_mode; int ret; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; + char thread_name[RTE_THREAD_NAME_SIZE]; eal_log_init(NULL, 0); @@ -468,7 +468,7 @@ rte_eal_init(int argc, char **argv) /* Set thread name for aid in debugging. */ snprintf(thread_name, sizeof(thread_name), - "rte-worker-%d", i); + "dpdk-worker%d", i); rte_thread_set_name(lcore_config[i].thread_id, thread_name); ret = rte_thread_set_affinity_by_id(lcore_config[i].thread_id, diff --git a/lib/eal/windows/eal_interrupts.c b/lib/eal/windows/eal_interrupts.c index 49c4b9620af..49efdc098c1 100644 --- a/lib/eal/windows/eal_interrupts.c +++ b/lib/eal/windows/eal_interrupts.c @@ -98,7 +98,7 @@ rte_eal_intr_init(void) return -1; } - ret = rte_thread_create_control(&intr_thread, "eal-intr-thread", NULL, + ret = rte_thread_create_internal_control(&intr_thread, "intr", eal_intr_thread_main, NULL); if (ret != 0) { rte_errno = -ret; diff --git a/lib/eal/windows/eal_thread.c b/lib/eal/windows/eal_thread.c index 464d510838a..9e3df200b92 100644 --- a/lib/eal/windows/eal_thread.c +++ b/lib/eal/windows/eal_thread.c @@ -76,11 +76,3 @@ rte_sys_gettid(void) { return GetCurrentThreadId(); } - -int -rte_thread_setname(__rte_unused pthread_t id, __rte_unused const char *name) -{ - /* TODO */ - /* This is a stub, not the expected result */ - return 0; -} diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index e528ac99910..acf648456cf 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -324,7 +324,7 @@ void rte_thread_set_name(rte_thread_t thread_id, const char *thread_name) { int ret = 0; - wchar_t wname[RTE_MAX_THREAD_NAME_LEN]; + wchar_t wname[RTE_THREAD_NAME_SIZE]; mbstate_t state = {0}; size_t rv; HANDLE thread_handle; diff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h index f2ee1a9ce9c..f754423a81b 100644 --- a/lib/eal/x86/include/rte_atomic.h +++ b/lib/eal/x86/include/rte_atomic.h @@ -66,11 +66,15 @@ extern "C" { static __rte_always_inline void rte_smp_mb(void) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_mfence(); +#else #ifdef RTE_ARCH_I686 asm volatile("lock addl $0, -128(%%esp); " ::: "memory"); #else asm volatile("lock addl $0, -128(%%rsp); " ::: "memory"); #endif +#endif } #define rte_io_mb() rte_mb() @@ -79,20 +83,22 @@ rte_smp_mb(void) #define rte_io_rmb() rte_compiler_barrier() +#ifndef RTE_TOOLCHAIN_MSVC + /** * Synchronization fence between threads based on the specified memory order. * - * On x86 the __atomic_thread_fence(__ATOMIC_SEQ_CST) generates full 'mfence' + * On x86 the __rte_atomic_thread_fence(rte_memory_order_seq_cst) generates full 'mfence' * which is quite expensive. The optimized implementation of rte_smp_mb is * used instead. */ static __rte_always_inline void -rte_atomic_thread_fence(int memorder) +rte_atomic_thread_fence(rte_memory_order memorder) { - if (memorder == __ATOMIC_SEQ_CST) + if (memorder == rte_memory_order_seq_cst) rte_smp_mb(); else - __atomic_thread_fence(memorder); + __rte_atomic_thread_fence(memorder); } /*------------------------- 16 bit atomic operations -------------------------*/ @@ -275,6 +281,8 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) #include "rte_atomic_64.h" #endif +#endif + #ifdef __cplusplus } #endif diff --git a/lib/eal/x86/include/rte_byteorder.h b/lib/eal/x86/include/rte_byteorder.h index a2dfecc1f55..adbec0c1576 100644 --- a/lib/eal/x86/include/rte_byteorder.h +++ b/lib/eal/x86/include/rte_byteorder.h @@ -18,6 +18,7 @@ extern "C" { #define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN #endif +#ifndef RTE_FORCE_INTRINSICS /* * An architecture-optimized byte swap for a 16-bit value. * @@ -47,7 +48,6 @@ static inline uint32_t rte_arch_bswap32(uint32_t _x) return x; } -#ifndef RTE_FORCE_INTRINSICS #define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ rte_constant_bswap16(x) : \ rte_arch_bswap16(x))) @@ -59,14 +59,11 @@ static inline uint32_t rte_arch_bswap32(uint32_t _x) #define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ rte_constant_bswap64(x) : \ rte_arch_bswap64(x))) + +#ifdef RTE_ARCH_I686 +#include "rte_byteorder_32.h" #else -/* - * __builtin_bswap16 is only available gcc 4.8 and upwards - */ -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) -#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap16(x) : \ - rte_arch_bswap16(x))) +#include "rte_byteorder_64.h" #endif #endif @@ -86,12 +83,6 @@ static inline uint32_t rte_arch_bswap32(uint32_t _x) #define rte_be_to_cpu_32(x) rte_bswap32(x) #define rte_be_to_cpu_64(x) rte_bswap64(x) -#ifdef RTE_ARCH_I686 -#include "rte_byteorder_32.h" -#else -#include "rte_byteorder_64.h" -#endif - #ifdef __cplusplus } #endif diff --git a/lib/eal/x86/include/rte_cpuflags.h b/lib/eal/x86/include/rte_cpuflags.h index 92e90fb6e0f..1ee00e70fe7 100644 --- a/lib/eal/x86/include/rte_cpuflags.h +++ b/lib/eal/x86/include/rte_cpuflags.h @@ -133,9 +133,7 @@ enum rte_cpu_flag_t { RTE_CPUFLAG_AVX512VP2INTERSECT, /**< AVX512 Two Register Intersection */ RTE_CPUFLAG_WAITPKG, /**< UMONITOR/UMWAIT/TPAUSE */ - - /* The last item */ - RTE_CPUFLAG_NUMFLAGS, /**< This should always be the last! */ + RTE_CPUFLAG_MONITORX, /**< MONITORX */ }; #include "generic/rte_cpuflags.h" diff --git a/lib/eal/x86/include/rte_cycles.h b/lib/eal/x86/include/rte_cycles.h index 23d664bda98..2afe85e28c6 100644 --- a/lib/eal/x86/include/rte_cycles.h +++ b/lib/eal/x86/include/rte_cycles.h @@ -6,6 +6,12 @@ #ifndef _RTE_CYCLES_X86_64_H_ #define _RTE_CYCLES_X86_64_H_ +#ifdef RTE_TOOLCHAIN_MSVC +#include +#else +#include +#endif + #ifdef __cplusplus extern "C" { #endif @@ -23,6 +29,7 @@ extern int rte_cycles_vmware_tsc_map; static inline uint64_t rte_rdtsc(void) { +#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT union { uint64_t tsc_64; struct { @@ -31,7 +38,6 @@ rte_rdtsc(void) }; } tsc; -#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT if (unlikely(rte_cycles_vmware_tsc_map)) { /* ecx = 0x10000 corresponds to the physical TSC for VMware */ asm volatile("rdpmc" : @@ -41,11 +47,7 @@ rte_rdtsc(void) return tsc.tsc_64; } #endif - - asm volatile("rdtsc" : - "=a" (tsc.lo_32), - "=d" (tsc.hi_32)); - return tsc.tsc_64; + return __rdtsc(); } static inline uint64_t diff --git a/lib/eal/x86/include/rte_prefetch.h b/lib/eal/x86/include/rte_prefetch.h index 7fd01c4913f..715c61b545e 100644 --- a/lib/eal/x86/include/rte_prefetch.h +++ b/lib/eal/x86/include/rte_prefetch.h @@ -9,39 +9,63 @@ extern "C" { #endif +#ifdef RTE_TOOLCHAIN_MSVC +#include +#endif + #include #include #include "generic/rte_prefetch.h" static inline void rte_prefetch0(const volatile void *p) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_prefetch((const void *)p, _MM_HINT_T0); +#else asm volatile ("prefetcht0 %[p]" : : [p] "m" (*(const volatile char *)p)); +#endif } static inline void rte_prefetch1(const volatile void *p) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_prefetch((const void *)p, _MM_HINT_T1); +#else asm volatile ("prefetcht1 %[p]" : : [p] "m" (*(const volatile char *)p)); +#endif } static inline void rte_prefetch2(const volatile void *p) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_prefetch((const void *)p, _MM_HINT_T2); +#else asm volatile ("prefetcht2 %[p]" : : [p] "m" (*(const volatile char *)p)); +#endif } static inline void rte_prefetch_non_temporal(const volatile void *p) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_prefetch((const void *)p, _MM_HINT_NTA); +#else asm volatile ("prefetchnta %[p]" : : [p] "m" (*(const volatile char *)p)); +#endif } -/* - * We use raw byte codes for now as only the newest compiler - * versions support this instruction natively. - */ __rte_experimental static inline void rte_cldemote(const volatile void *p) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_cldemote(p); +#else + /* + * We use raw byte codes for now as only the newest compiler + * versions support this instruction natively. + */ asm volatile(".byte 0x0f, 0x1c, 0x06" :: "S" (p)); +#endif } #ifdef __cplusplus diff --git a/lib/eal/x86/include/rte_rtm.h b/lib/eal/x86/include/rte_rtm.h index 36bf49846f0..b84e58e0592 100644 --- a/lib/eal/x86/include/rte_rtm.h +++ b/lib/eal/x86/include/rte_rtm.h @@ -5,6 +5,7 @@ #ifndef _RTE_RTM_H_ #define _RTE_RTM_H_ 1 +#include /* Official RTM intrinsics interface matching gcc/icc, but works on older gcc compatible compilers and binutils. */ @@ -28,31 +29,22 @@ extern "C" { static __rte_always_inline unsigned int rte_xbegin(void) { - unsigned int ret = RTE_XBEGIN_STARTED; - - asm volatile(".byte 0xc7,0xf8 ; .long 0" : "+a" (ret) :: "memory"); - return ret; + return _xbegin(); } static __rte_always_inline void rte_xend(void) { - asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory"); + _xend(); } /* not an inline function to workaround a clang bug with -O0 */ -#define rte_xabort(status) do { \ - asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \ -} while (0) +#define rte_xabort(status) _xabort(status) static __rte_always_inline int rte_xtest(void) { - unsigned char out; - - asm volatile(".byte 0x0f,0x01,0xd6 ; setnz %0" : - "=r" (out) :: "memory"); - return out; + return _xtest(); } #ifdef __cplusplus diff --git a/lib/eal/x86/include/rte_spinlock.h b/lib/eal/x86/include/rte_spinlock.h index 0b20ddfd73d..a6c23ea1f64 100644 --- a/lib/eal/x86/include/rte_spinlock.h +++ b/lib/eal/x86/include/rte_spinlock.h @@ -78,7 +78,7 @@ static inline int rte_tm_supported(void) } static inline int -rte_try_tm(volatile int *lock) +rte_try_tm(volatile RTE_ATOMIC(int) *lock) { int i, retries; diff --git a/lib/eal/x86/include/rte_vect.h b/lib/eal/x86/include/rte_vect.h index 2e40b77da9b..560f9e4db38 100644 --- a/lib/eal/x86/include/rte_vect.h +++ b/lib/eal/x86/include/rte_vect.h @@ -16,20 +16,13 @@ #include #include "generic/rte_vect.h" -#if (defined(__ICC) || \ - (defined(_WIN64)) || \ - (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) - +#if defined(__ICC) || defined(_WIN64) #include /* SSE4 */ - #if defined(__AVX__) #include #endif - #else - #include - #endif #ifdef __cplusplus diff --git a/lib/eal/x86/rte_cpuflags.c b/lib/eal/x86/rte_cpuflags.c index d6b518251b0..26163ab7463 100644 --- a/lib/eal/x86/rte_cpuflags.c +++ b/lib/eal/x86/rte_cpuflags.c @@ -133,6 +133,7 @@ const struct feature_entry rte_cpu_feature_table[] = { FEAT_DEF(LAHF_SAHF, 0x80000001, 0, RTE_REG_ECX, 0) FEAT_DEF(LZCNT, 0x80000001, 0, RTE_REG_ECX, 4) + FEAT_DEF(MONITORX, 0x80000001, 0, RTE_REG_ECX, 29) FEAT_DEF(SYSCALL, 0x80000001, 0, RTE_REG_EDX, 11) FEAT_DEF(XD, 0x80000001, 0, RTE_REG_EDX, 20) @@ -150,7 +151,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) cpuid_registers_t regs; unsigned int maxleaf; - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) /* Flag does not match anything in the feature tables */ return -ENOENT; @@ -165,9 +166,13 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) if (maxleaf < feat->leaf) return 0; - __cpuid_count(feat->leaf, feat->subleaf, +#ifdef RTE_TOOLCHAIN_MSVC + __cpuidex(regs, feat->leaf, feat->subleaf); +#else + __cpuid_count(feat->leaf, feat->subleaf, regs[RTE_REG_EAX], regs[RTE_REG_EBX], regs[RTE_REG_ECX], regs[RTE_REG_EDX]); +#endif /* check if the feature is enabled */ return (regs[feat->reg] >> feat->bit) & 1; @@ -176,7 +181,7 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) const char * rte_cpu_get_flag_name(enum rte_cpu_flag_t feature) { - if (feature >= RTE_CPUFLAG_NUMFLAGS) + if ((unsigned int)feature >= RTE_DIM(rte_cpu_feature_table)) return NULL; return rte_cpu_feature_table[feature].name; } @@ -191,5 +196,7 @@ rte_cpu_get_intrinsics_support(struct rte_cpu_intrinsics *intrinsics) intrinsics->power_pause = 1; if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_RTM)) intrinsics->power_monitor_multi = 1; + } else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MONITORX)) { + intrinsics->power_monitor = 1; } } diff --git a/lib/eal/x86/rte_cpuid.h b/lib/eal/x86/rte_cpuid.h index b773ad9312c..c6abaad13da 100644 --- a/lib/eal/x86/rte_cpuid.h +++ b/lib/eal/x86/rte_cpuid.h @@ -5,7 +5,9 @@ #ifndef RTE_CPUID_H #define RTE_CPUID_H +#ifndef RTE_TOOLCHAIN_MSVC #include +#endif enum cpu_register_t { RTE_REG_EAX = 0, @@ -16,4 +18,9 @@ enum cpu_register_t { typedef uint32_t cpuid_registers_t[4]; +#ifdef RTE_TOOLCHAIN_MSVC +int +__get_cpuid_max(unsigned int e, unsigned int *s); +#endif + #endif /* RTE_CPUID_H */ diff --git a/lib/eal/x86/rte_cycles.c b/lib/eal/x86/rte_cycles.c index 0e695caf28a..69ed59b4f00 100644 --- a/lib/eal/x86/rte_cycles.c +++ b/lib/eal/x86/rte_cycles.c @@ -4,7 +4,11 @@ #include #include +#ifdef RTE_TOOLCHAIN_MSVC +#define bit_AVX (1 << 28) +#else #include +#endif #include "eal_private.h" @@ -82,9 +86,25 @@ check_model_gdm_dnv(uint8_t model) return 0; } +#ifdef RTE_TOOLCHAIN_MSVC +int +__get_cpuid_max(unsigned int e, unsigned int *s) +{ + uint32_t cpuinfo[4]; + + __cpuid(cpuinfo, e); + if (s) + *s = cpuinfo[1]; + return cpuinfo[0]; +} +#endif + uint64_t get_tsc_freq_arch(void) { +#ifdef RTE_TOOLCHAIN_MSVC + int cpuinfo[4]; +#endif uint64_t tsc_hz = 0; uint32_t a, b, c, d, maxleaf; uint8_t mult, model; @@ -97,14 +117,30 @@ get_tsc_freq_arch(void) maxleaf = __get_cpuid_max(0, NULL); if (maxleaf >= 0x15) { +#ifdef RTE_TOOLCHAIN_MSVC + __cpuid(cpuinfo, 0x15); + a = cpuinfo[0]; + b = cpuinfo[1]; + c = cpuinfo[2]; + d = cpuinfo[3]; +#else __cpuid(0x15, a, b, c, d); +#endif /* EBX : TSC/Crystal ratio, ECX : Crystal Hz */ if (b && c) return c * (b / a); } +#ifdef RTE_TOOLCHAIN_MSVC + __cpuid(cpuinfo, 0x1); + a = cpuinfo[0]; + b = cpuinfo[1]; + c = cpuinfo[2]; + d = cpuinfo[3]; +#else __cpuid(0x1, a, b, c, d); +#endif model = rte_cpu_get_model(a); if (check_model_wsm_nhm(model)) diff --git a/lib/eal/x86/rte_hypervisor.c b/lib/eal/x86/rte_hypervisor.c index c38cfc09df8..04fe76751a0 100644 --- a/lib/eal/x86/rte_hypervisor.c +++ b/lib/eal/x86/rte_hypervisor.c @@ -23,9 +23,13 @@ rte_hypervisor_get(void) if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_HYPERVISOR)) return RTE_HYPERVISOR_NONE; +#ifdef RTE_TOOLCHAIN_MSVC + __cpuid(regs, HYPERVISOR_INFO_LEAF); +#else __cpuid(HYPERVISOR_INFO_LEAF, regs[RTE_REG_EAX], regs[RTE_REG_EBX], regs[RTE_REG_ECX], regs[RTE_REG_EDX]); +#endif for (reg = 1; reg < 4; reg++) memcpy(name + (reg - 1) * 4, ®s[reg], 4); name[12] = '\0'; diff --git a/lib/eal/x86/rte_power_intrinsics.c b/lib/eal/x86/rte_power_intrinsics.c index f749da9b851..483395dcd52 100644 --- a/lib/eal/x86/rte_power_intrinsics.c +++ b/lib/eal/x86/rte_power_intrinsics.c @@ -17,19 +17,94 @@ static struct power_wait_status { volatile void *monitor_addr; /**< NULL if not currently sleeping */ } __rte_cache_aligned wait_status[RTE_MAX_LCORE]; +/* + * This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state. + * For more information about usage of these instructions, please refer to + * Intel(R) 64 and IA-32 Architectures Software Developer's Manual. + */ +static void intel_umonitor(volatile void *addr) +{ +#if defined(RTE_TOOLCHAIN_MSVC) || defined(__WAITPKG__) + /* cast away "volatile" when using the intrinsic */ + _umonitor((void *)(uintptr_t)addr); +#else + /* + * we're using raw byte codes for compiler versions which + * don't support this instruction natively. + */ + asm volatile(".byte 0xf3, 0x0f, 0xae, 0xf7;" + : + : "D"(addr)); +#endif +} + +static void intel_umwait(const uint64_t timeout) +{ + const uint32_t tsc_l = (uint32_t)timeout; + const uint32_t tsc_h = (uint32_t)(timeout >> 32); + +#if defined(RTE_TOOLCHAIN_MSVC) || defined(__WAITPKG__) + _umwait(tsc_l, tsc_h); +#else + asm volatile(".byte 0xf2, 0x0f, 0xae, 0xf7;" + : /* ignore rflags */ + : "D"(0), /* enter C0.2 */ + "a"(tsc_l), "d"(tsc_h)); +#endif +} + +/* + * This function uses MONITORX/MWAITX instructions and will enter C1 state. + * For more information about usage of these instructions, please refer to + * AMD64 Architecture Programmer’s Manual. + */ +static void amd_monitorx(volatile void *addr) +{ +#if defined(RTE_TOOLCHAIN_MSVC) || defined(__MWAITX__) + /* cast away "volatile" when using the intrinsic */ + _mm_monitorx((void *)(uintptr_t)addr, 0, 0); +#else + asm volatile(".byte 0x0f, 0x01, 0xfa;" + : + : "a"(addr), + "c"(0), /* no extensions */ + "d"(0)); /* no hints */ +#endif +} + +static void amd_mwaitx(const uint64_t timeout) +{ + RTE_SET_USED(timeout); +#if defined(RTE_TOOLCHAIN_MSVC) || defined(__MWAITX__) + _mm_mwaitx(0, 0, 0); +#else + asm volatile(".byte 0x0f, 0x01, 0xfb;" + : /* ignore rflags */ + : "a"(0), /* enter C1 */ + "c"(0)); /* no time-out */ +#endif +} + +static struct { + void (*mmonitor)(volatile void *addr); + void (*mwait)(const uint64_t timeout); +} __rte_cache_aligned power_monitor_ops; + static inline void __umwait_wakeup(volatile void *addr) { uint64_t val; /* trigger a write but don't change the value */ - val = __atomic_load_n((volatile uint64_t *)addr, __ATOMIC_RELAXED); - __atomic_compare_exchange_n((volatile uint64_t *)addr, &val, val, 0, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); + val = rte_atomic_load_explicit((volatile __rte_atomic uint64_t *)addr, + rte_memory_order_relaxed); + rte_atomic_compare_exchange_strong_explicit((volatile __rte_atomic uint64_t *)addr, + &val, val, rte_memory_order_relaxed, rte_memory_order_relaxed); } static bool wait_supported; static bool wait_multi_supported; +static bool monitor_supported; static inline uint64_t __get_umwait_val(const volatile void *p, const uint8_t sz) @@ -74,14 +149,12 @@ int rte_power_monitor(const struct rte_power_monitor_cond *pmc, const uint64_t tsc_timestamp) { - const uint32_t tsc_l = (uint32_t)tsc_timestamp; - const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32); const unsigned int lcore_id = rte_lcore_id(); struct power_wait_status *s; uint64_t cur_value; /* prevent user from running this instruction if it's not supported */ - if (!wait_supported) + if (!monitor_supported) return -ENOTSUP; /* prevent non-EAL thread from using this API */ @@ -103,15 +176,8 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc, rte_spinlock_lock(&s->lock); s->monitor_addr = pmc->addr; - /* - * we're using raw byte codes for now as only the newest compiler - * versions support this instruction natively. - */ - - /* set address for UMONITOR */ - asm volatile(".byte 0xf3, 0x0f, 0xae, 0xf7;" - : - : "D"(pmc->addr)); + /* set address for memory monitor */ + power_monitor_ops.mmonitor(pmc->addr); /* now that we've put this address into monitor, we can unlock */ rte_spinlock_unlock(&s->lock); @@ -122,11 +188,8 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc, if (pmc->fn(cur_value, pmc->opaque) != 0) goto end; - /* execute UMWAIT */ - asm volatile(".byte 0xf2, 0x0f, 0xae, 0xf7;" - : /* ignore rflags */ - : "D"(0), /* enter C0.2 */ - "a"(tsc_l), "d"(tsc_h)); + /* execute mwait */ + power_monitor_ops.mwait(tsc_timestamp); end: /* erase sleep address */ @@ -153,10 +216,14 @@ rte_power_pause(const uint64_t tsc_timestamp) return -ENOTSUP; /* execute TPAUSE */ +#if defined(RTE_TOOLCHAIN_MSVC) || defined(__WAITPKG__) + _tpause(tsc_l, tsc_h); +#else asm volatile(".byte 0x66, 0x0f, 0xae, 0xf7;" : /* ignore rflags */ : "D"(0), /* enter C0.2 */ "a"(tsc_l), "d"(tsc_h)); +#endif return 0; } @@ -170,6 +237,16 @@ RTE_INIT(rte_power_intrinsics_init) { wait_supported = 1; if (i.power_monitor_multi) wait_multi_supported = 1; + if (i.power_monitor) + monitor_supported = 1; + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MONITORX)) { + power_monitor_ops.mmonitor = &amd_monitorx; + power_monitor_ops.mwait = &amd_mwaitx; + } else { + power_monitor_ops.mmonitor = &intel_umonitor; + power_monitor_ops.mwait = &intel_umwait; + } } int @@ -178,7 +255,7 @@ rte_power_monitor_wakeup(const unsigned int lcore_id) struct power_wait_status *s; /* prevent user from running this instruction if it's not supported */ - if (!wait_supported) + if (!monitor_supported) return -ENOTSUP; /* prevent buffer overrun */ diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c index 0be1e8ca042..fff4b7b4cdf 100644 --- a/lib/ethdev/ethdev_driver.c +++ b/lib/ethdev/ethdev_driver.c @@ -3,6 +3,7 @@ */ #include +#include #include #include @@ -44,6 +45,7 @@ eth_dev_allocated(const char *name) static uint16_t eth_dev_find_free_port(void) + __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) { uint16_t i; @@ -60,6 +62,7 @@ eth_dev_find_free_port(void) static struct rte_eth_dev * eth_dev_get(uint16_t port_id) + __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) { struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; @@ -86,10 +89,11 @@ rte_eth_dev_allocate(const char *name) return NULL; } - eth_dev_shared_data_prepare(); + /* Synchronize port creation between primary and secondary processes. */ + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - /* Synchronize port creation between primary and secondary threads. */ - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + if (eth_dev_shared_data_prepare() == NULL) + goto unlock; if (eth_dev_allocated(name) != NULL) { RTE_ETHDEV_LOG(ERR, @@ -111,9 +115,11 @@ rte_eth_dev_allocate(const char *name) eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; eth_dev->data->mtu = RTE_ETHER_MTU; pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); + RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + eth_dev_shared_data->allocated_ports++; unlock: - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); return eth_dev; } @@ -123,13 +129,14 @@ rte_eth_dev_allocated(const char *name) { struct rte_eth_dev *ethdev; - eth_dev_shared_data_prepare(); - - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - ethdev = eth_dev_allocated(name); + if (eth_dev_shared_data_prepare() != NULL) + ethdev = eth_dev_allocated(name); + else + ethdev = NULL; - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); return ethdev; } @@ -145,10 +152,11 @@ rte_eth_dev_attach_secondary(const char *name) uint16_t i; struct rte_eth_dev *eth_dev = NULL; - eth_dev_shared_data_prepare(); - /* Synchronize port attachment to primary port creation and release. */ - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); + + if (eth_dev_shared_data_prepare() == NULL) + goto unlock; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) @@ -163,7 +171,8 @@ rte_eth_dev_attach_secondary(const char *name) RTE_ASSERT(eth_dev->data->port_id == i); } - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); +unlock: + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); return eth_dev; } @@ -216,10 +225,19 @@ rte_eth_dev_probing_finish(struct rte_eth_dev *dev) int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) { + int ret; + if (eth_dev == NULL) return -EINVAL; - eth_dev_shared_data_prepare(); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); + if (eth_dev_shared_data_prepare() == NULL) + ret = -EINVAL; + else + ret = 0; + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); + if (ret != 0) + return ret; if (eth_dev->state != RTE_ETH_DEV_UNUSED) rte_eth_dev_callback_process(eth_dev, @@ -227,7 +245,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); eth_dev->state = RTE_ETH_DEV_UNUSED; eth_dev->device = NULL; @@ -249,9 +267,13 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->dev_private); pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); + eth_dev->data = NULL; + + eth_dev_shared_data->allocated_ports--; + eth_dev_shared_data_release(); } - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); return 0; } diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index 980f837ab65..deb23ada181 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -18,6 +18,8 @@ extern "C" { * use them. */ +#include + #include #include #include @@ -58,6 +60,10 @@ struct rte_eth_dev { eth_rx_descriptor_status_t rx_descriptor_status; /** Check the status of a Tx descriptor */ eth_tx_descriptor_status_t tx_descriptor_status; + /** Pointer to PMD transmit mbufs reuse function */ + eth_recycle_tx_mbufs_reuse_t recycle_tx_mbufs_reuse; + /** Pointer to PMD receive descriptors refill function */ + eth_recycle_rx_descriptors_refill_t recycle_rx_descriptors_refill; /** * Device data that is shared between primary and secondary processes @@ -507,6 +513,10 @@ typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev, typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev, uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo); +typedef void (*eth_recycle_rxq_info_get_t)(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); + typedef int (*eth_burst_mode_get_t)(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_burst_mode *mode); @@ -1250,6 +1260,8 @@ struct eth_dev_ops { eth_rxq_info_get_t rxq_info_get; /** Retrieve Tx queue information */ eth_txq_info_get_t txq_info_get; + /** Retrieve mbufs recycle Rx queue information */ + eth_recycle_rxq_info_get_t recycle_rxq_info_get; eth_burst_mode_get_t rx_burst_mode_get; /**< Get Rx burst mode */ eth_burst_mode_get_t tx_burst_mode_get; /**< Get Tx burst mode */ eth_fw_version_get_t fw_version_get; /**< Get firmware version */ diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c index 14ec8c6ccf3..7cc7f28296a 100644 --- a/lib/ethdev/ethdev_private.c +++ b/lib/ethdev/ethdev_private.c @@ -11,12 +11,9 @@ static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; -/* Shared memory between primary and secondary processes. */ +static const struct rte_memzone *eth_dev_shared_mz; struct eth_dev_shared *eth_dev_shared_data; -/* spinlock for shared data allocation */ -static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; - /* spinlock for eth device callbacks */ rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; @@ -277,6 +274,8 @@ eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo, fpo->rx_queue_count = dev->rx_queue_count; fpo->rx_descriptor_status = dev->rx_descriptor_status; fpo->tx_descriptor_status = dev->tx_descriptor_status; + fpo->recycle_tx_mbufs_reuse = dev->recycle_tx_mbufs_reuse; + fpo->recycle_rx_descriptors_refill = dev->recycle_rx_descriptors_refill; fpo->rxq.data = dev->data->rx_queues; fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs; @@ -322,15 +321,13 @@ rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id, return nb_pkts; } -void +void * eth_dev_shared_data_prepare(void) { const unsigned int flags = 0; const struct rte_memzone *mz; - rte_spinlock_lock(ð_dev_shared_data_lock); - - if (eth_dev_shared_data == NULL) { + if (eth_dev_shared_mz == NULL) { if (rte_eal_process_type() == RTE_PROC_PRIMARY) { /* Allocate port data and ownership shared memory. */ mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, @@ -338,20 +335,39 @@ eth_dev_shared_data_prepare(void) rte_socket_id(), flags); } else mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); - if (mz == NULL) - rte_panic("Cannot allocate ethdev shared data\n"); + if (mz == NULL) { + RTE_ETHDEV_LOG(ERR, "Cannot allocate ethdev shared data\n"); + goto out; + } + eth_dev_shared_mz = mz; eth_dev_shared_data = mz->addr; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev_shared_data->allocated_owners = 0; eth_dev_shared_data->next_owner_id = RTE_ETH_DEV_NO_OWNER + 1; - rte_spinlock_init(ð_dev_shared_data->ownership_lock); + eth_dev_shared_data->allocated_ports = 0; memset(eth_dev_shared_data->data, 0, sizeof(eth_dev_shared_data->data)); } } +out: + return eth_dev_shared_data; +} + +void +eth_dev_shared_data_release(void) +{ + RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + + if (eth_dev_shared_data->allocated_ports != 0) + return; + if (eth_dev_shared_data->allocated_owners != 0) + return; - rte_spinlock_unlock(ð_dev_shared_data_lock); + rte_memzone_free(eth_dev_shared_mz); + eth_dev_shared_mz = NULL; + eth_dev_shared_data = NULL; } void diff --git a/lib/ethdev/ethdev_private.h b/lib/ethdev/ethdev_private.h index acb4b335c81..0d36b9c30f7 100644 --- a/lib/ethdev/ethdev_private.h +++ b/lib/ethdev/ethdev_private.h @@ -7,18 +7,22 @@ #include +#include #include #include #include "rte_ethdev.h" struct eth_dev_shared { + uint64_t allocated_owners; uint64_t next_owner_id; - rte_spinlock_t ownership_lock; + uint64_t allocated_ports; struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; }; -extern struct eth_dev_shared *eth_dev_shared_data; +/* Shared memory between primary and secondary processes. */ +extern struct eth_dev_shared *eth_dev_shared_data + __rte_guarded_by(rte_mcfg_ethdev_get_lock()); /** * The user application callback description. @@ -65,7 +69,10 @@ void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo, const struct rte_eth_dev *dev); -void eth_dev_shared_data_prepare(void); +void *eth_dev_shared_data_prepare(void) + __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()); +void eth_dev_shared_data_release(void) + __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()); void eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid); void eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid); diff --git a/lib/ethdev/ethdev_trace.h b/lib/ethdev/ethdev_trace.h index 423e71236e0..e367d29c3ad 100644 --- a/lib/ethdev/ethdev_trace.h +++ b/lib/ethdev/ethdev_trace.h @@ -112,8 +112,9 @@ RTE_TRACE_POINT( RTE_TRACE_POINT( rte_ethdev_trace_owner_new, - RTE_TRACE_POINT_ARGS(uint64_t owner_id), + RTE_TRACE_POINT_ARGS(uint64_t owner_id, int ret), rte_trace_point_emit_u64(owner_id); + rte_trace_point_emit_int(ret); ) RTE_TRACE_POINT( @@ -377,10 +378,11 @@ RTE_TRACE_POINT( RTE_TRACE_POINT( rte_ethdev_trace_owner_get, RTE_TRACE_POINT_ARGS(uint16_t port_id, - const struct rte_eth_dev_owner *owner), + const struct rte_eth_dev_owner *owner, int ret), rte_trace_point_emit_u16(port_id); rte_trace_point_emit_u64(owner->id); rte_trace_point_emit_string(owner->name); + rte_trace_point_emit_int(ret); ) RTE_TRACE_POINT( diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 0840d2b5942..9dabcb5ae28 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -388,7 +388,7 @@ rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) static bool eth_dev_is_allocated(const struct rte_eth_dev *ethdev) { - return ethdev->data->name[0] != '\0'; + return ethdev->data != NULL && ethdev->data->name[0] != '\0'; } int @@ -409,6 +409,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id) static int eth_is_valid_owner_id(uint64_t owner_id) + __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) { if (owner_id == RTE_ETH_DEV_NO_OWNER || eth_dev_shared_data->next_owner_id <= owner_id) @@ -432,27 +433,34 @@ rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) int rte_eth_dev_owner_new(uint64_t *owner_id) { + int ret; + if (owner_id == NULL) { RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); return -EINVAL; } - eth_dev_shared_data_prepare(); - - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - *owner_id = eth_dev_shared_data->next_owner_id++; + if (eth_dev_shared_data_prepare() != NULL) { + *owner_id = eth_dev_shared_data->next_owner_id++; + eth_dev_shared_data->allocated_owners++; + ret = 0; + } else { + ret = -ENOMEM; + } - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); - rte_ethdev_trace_owner_new(*owner_id); + rte_ethdev_trace_owner_new(*owner_id, ret); - return 0; + return ret; } static int eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, const struct rte_eth_dev_owner *new_owner) + __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) { struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; struct rte_eth_dev_owner *port_owner; @@ -503,13 +511,14 @@ rte_eth_dev_owner_set(const uint16_t port_id, { int ret; - eth_dev_shared_data_prepare(); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - - ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); + if (eth_dev_shared_data_prepare() != NULL) + ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); + else + ret = -ENOMEM; - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); rte_ethdev_trace_owner_set(port_id, owner, ret); @@ -523,13 +532,14 @@ rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; int ret; - eth_dev_shared_data_prepare(); - - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - ret = eth_dev_owner_set(port_id, owner_id, &new_owner); + if (eth_dev_shared_data_prepare() != NULL) + ret = eth_dev_owner_set(port_id, owner_id, &new_owner); + else + ret = -ENOMEM; - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); rte_ethdev_trace_owner_unset(port_id, owner_id, ret); @@ -542,11 +552,11 @@ rte_eth_dev_owner_delete(const uint64_t owner_id) uint16_t port_id; int ret = 0; - eth_dev_shared_data_prepare(); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - - if (eth_is_valid_owner_id(owner_id)) { + if (eth_dev_shared_data_prepare() == NULL) { + ret = -ENOMEM; + } else if (eth_is_valid_owner_id(owner_id)) { for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { struct rte_eth_dev_data *data = rte_eth_devices[port_id].data; @@ -557,6 +567,8 @@ rte_eth_dev_owner_delete(const uint64_t owner_id) RTE_ETHDEV_LOG(NOTICE, "All port owners owned by %016"PRIx64" identifier have removed\n", owner_id); + eth_dev_shared_data->allocated_owners--; + eth_dev_shared_data_release(); } else { RTE_ETHDEV_LOG(ERR, "Invalid owner ID=%016"PRIx64"\n", @@ -564,7 +576,7 @@ rte_eth_dev_owner_delete(const uint64_t owner_id) ret = -EINVAL; } - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); rte_ethdev_trace_owner_delete(owner_id, ret); @@ -575,6 +587,7 @@ int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) { struct rte_eth_dev *ethdev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); ethdev = &rte_eth_devices[port_id]; @@ -591,15 +604,20 @@ rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) return -EINVAL; } - eth_dev_shared_data_prepare(); + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + if (eth_dev_shared_data_prepare() != NULL) { + rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); + ret = 0; + } else { + ret = -ENOMEM; + } - rte_ethdev_trace_owner_get(port_id, owner); + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); - return 0; + rte_ethdev_trace_owner_get(port_id, owner, ret); + + return ret; } int @@ -675,9 +693,12 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) return -EINVAL; } + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); /* shouldn't check 'rte_eth_devices[i].data', * because it might be overwritten by VDEV PMD */ tmp = eth_dev_shared_data->data[port_id].name; + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); + strcpy(name, tmp); rte_ethdev_trace_get_name_by_port(port_id, name); @@ -688,6 +709,7 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) { + int ret = -ENODEV; uint16_t pid; if (name == NULL) { @@ -701,16 +723,19 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) return -EINVAL; } - RTE_ETH_FOREACH_VALID_DEV(pid) - if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { - *port_id = pid; + rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); + RTE_ETH_FOREACH_VALID_DEV(pid) { + if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0) + continue; - rte_ethdev_trace_get_port_by_name(name, *port_id); - - return 0; - } + *port_id = pid; + rte_ethdev_trace_get_port_by_name(name, *port_id); + ret = 0; + break; + } + rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); - return -ENODEV; + return ret; } int @@ -1067,7 +1092,7 @@ eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size, } while (bitmask != 0) { - uint64_t offload = RTE_BIT64(__builtin_ctzll(bitmask)); + uint64_t offload = RTE_BIT64(rte_ctz64(bitmask)); const char *name = offload_name(offload); ret = snprintf(&buf[pos], size - pos, "%s,", name); @@ -1165,7 +1190,7 @@ eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, while (offloads_diff != 0) { /* Check if any offload is requested but not enabled. */ - offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); + offload = RTE_BIT64(rte_ctz64(offloads_diff)); if (offload & req_offloads) { RTE_ETHDEV_LOG(ERR, "Port %u failed to enable %s offload %s\n", @@ -1292,6 +1317,25 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, /* Backup mtu for rollback */ old_mtu = dev->data->mtu; + /* fields must be zero to reserve them for future ABI changes */ + if (dev_conf->rxmode.reserved_64s[0] != 0 || + dev_conf->rxmode.reserved_64s[1] != 0 || + dev_conf->rxmode.reserved_ptrs[0] != NULL || + dev_conf->rxmode.reserved_ptrs[1] != NULL) { + RTE_ETHDEV_LOG(ERR, "Rxmode reserved fields not zero\n"); + ret = -EINVAL; + goto rollback; + } + + if (dev_conf->txmode.reserved_64s[0] != 0 || + dev_conf->txmode.reserved_64s[1] != 0 || + dev_conf->txmode.reserved_ptrs[0] != NULL || + dev_conf->txmode.reserved_ptrs[1] != NULL) { + RTE_ETHDEV_LOG(ERR, "txmode reserved fields not zero\n"); + ret = -EINVAL; + goto rollback; + } + ret = rte_eth_dev_info_get(port_id, &dev_info); if (ret != 0) goto rollback; @@ -2080,6 +2124,15 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, if (*dev->dev_ops->rx_queue_setup == NULL) return -ENOTSUP; + if (rx_conf != NULL && + (rx_conf->reserved_64s[0] != 0 || + rx_conf->reserved_64s[1] != 0 || + rx_conf->reserved_ptrs[0] != NULL || + rx_conf->reserved_ptrs[1] != NULL)) { + RTE_ETHDEV_LOG(ERR, "Rx conf reserved fields not zero\n"); + return -EINVAL; + } + ret = rte_eth_dev_info_get(port_id, &dev_info); if (ret != 0) return ret; @@ -2283,6 +2336,12 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, return -EINVAL; } + if (conf->reserved != 0) { + RTE_ETHDEV_LOG(ERR, + "Rx hairpin reserved field not zero\n"); + return -EINVAL; + } + ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); if (ret != 0) return ret; @@ -2378,6 +2437,15 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, if (*dev->dev_ops->tx_queue_setup == NULL) return -ENOTSUP; + if (tx_conf != NULL && + (tx_conf->reserved_64s[0] != 0 || + tx_conf->reserved_64s[1] != 0 || + tx_conf->reserved_ptrs[0] != NULL || + tx_conf->reserved_ptrs[1] != NULL)) { + RTE_ETHDEV_LOG(ERR, "Tx conf reserved fields not zero\n"); + return -EINVAL; + } + ret = rte_eth_dev_info_get(port_id, &dev_info); if (ret != 0) return ret; @@ -5876,6 +5944,28 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, return 0; } +int +rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) +{ + struct rte_eth_dev *dev; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + ret = eth_dev_validate_rx_queue(dev, queue_id); + if (unlikely(ret != 0)) + return ret; + + if (*dev->dev_ops->recycle_rxq_info_get == NULL) + return -ENOTSUP; + + dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info); + + return 0; +} + int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode) diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 04a2564f222..85b9af7a02c 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -1820,6 +1820,30 @@ struct rte_eth_txq_info { uint8_t queue_state; /**< one of RTE_ETH_QUEUE_STATE_*. */ } __rte_cache_min_aligned; +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * Ethernet device Rx queue information structure for recycling mbufs. + * Used to retrieve Rx queue information when Tx queue reusing mbufs and moving + * them into Rx mbuf ring. + */ +struct rte_eth_recycle_rxq_info { + struct rte_mbuf **mbuf_ring; /**< mbuf ring of Rx queue. */ + struct rte_mempool *mp; /**< mempool of Rx queue. */ + uint16_t *refill_head; /**< head of Rx queue refilling mbufs. */ + uint16_t *receive_tail; /**< tail of Rx queue receiving pkts. */ + uint16_t mbuf_ring_size; /**< configured number of mbuf ring size. */ + /** + * Requirement on mbuf refilling batch size of Rx mbuf ring. + * For some PMD drivers, the number of Rx mbuf ring refilling mbufs + * should be aligned with mbuf ring size, in order to simplify + * ring wrapping around. + * Value 0 means that PMD drivers have no requirement for this. + */ + uint16_t refill_requirement; +} __rte_cache_min_aligned; + /* Generic Burst mode flag definition, values can be ORed. */ /** @@ -2038,7 +2062,6 @@ struct rte_eth_dev_owner { #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1) /** Device is a bonding member */ #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2) -#define RTE_ETH_DEV_BONDED_SLAVE RTE_DEPRECATED(RTE_ETH_DEV_BONDED_SLAVE) RTE_ETH_DEV_BONDING_MEMBER /** Device supports device removal interrupt */ #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3) /** Device is port representor */ @@ -2789,6 +2812,9 @@ int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id); * Device RTE_ETH_DEV_NOLIVE_MAC_ADDR flag causes MAC address to be set before * PMD port start callback function is invoked. * + * All device queues (except form deferred start queues) status should be + * `RTE_ETH_QUEUE_STATE_STARTED` after start. + * * On success, all basic functions exported by the Ethernet API (link status, * receive/transmit, and so on) can be invoked. * @@ -2805,6 +2831,8 @@ int rte_eth_dev_start(uint16_t port_id); * Stop an Ethernet device. The device can be restarted with a call to * rte_eth_dev_start() * + * All device queues status should be `RTE_ETH_QUEUE_STATE_STOPPED` after stop. + * * @param port_id * The port identifier of the Ethernet device. * @return @@ -3882,6 +3910,10 @@ struct rte_eth_event_macsec_desc { * eth device. */ enum rte_eth_event_ipsec_subtype { + /** PMD specific error start */ + RTE_ETH_EVENT_IPSEC_PMD_ERROR_START = -256, + /** PMD specific error end */ + RTE_ETH_EVENT_IPSEC_PMD_ERROR_END = -1, /** Unknown event type */ RTE_ETH_EVENT_IPSEC_UNKNOWN = 0, /** Sequence number overflow */ @@ -4853,6 +4885,31 @@ int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Retrieve information about given ports's Rx queue for recycling mbufs. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The Rx queue on the Ethernet devicefor which information + * will be retrieved. + * @param recycle_rxq_info + * A pointer to a structure of type *rte_eth_recycle_rxq_info* to be filled. + * + * @return + * - 0: Success + * - -ENODEV: If *port_id* is invalid. + * - -ENOTSUP: routine is not supported by the device PMD. + * - -EINVAL: The queue_id is out of range. + */ +__rte_experimental +int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, + uint16_t queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); + /** * Retrieve information about the Rx packet burst mode. * @@ -6527,6 +6584,137 @@ rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); } +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Recycle used mbufs from a transmit queue of an Ethernet device, and move + * these mbufs into a mbuf ring for a receive queue of an Ethernet device. + * This can bypass mempool path to save CPU cycles. + * + * The rte_eth_recycle_mbufs() function loops, with rte_eth_rx_burst() and + * rte_eth_tx_burst() functions, freeing Tx used mbufs and replenishing Rx + * descriptors. The number of recycling mbufs depends on the request of Rx mbuf + * ring, with the constraint of enough used mbufs from Tx mbuf ring. + * + * For each recycling mbufs, the rte_eth_recycle_mbufs() function performs the + * following operations: + * + * - Copy used *rte_mbuf* buffer pointers from Tx mbuf ring into Rx mbuf ring. + * + * - Replenish the Rx descriptors with the recycling *rte_mbuf* mbufs freed + * from the Tx mbuf ring. + * + * This function spilts Rx and Tx path with different callback functions. The + * callback function recycle_tx_mbufs_reuse is for Tx driver. The callback + * function recycle_rx_descriptors_refill is for Rx driver. rte_eth_recycle_mbufs() + * can support the case that Rx Ethernet device is different from Tx Ethernet device. + * + * It is the responsibility of users to select the Rx/Tx queue pair to recycle + * mbufs. Before call this function, users must call rte_eth_recycle_rxq_info_get + * function to retrieve selected Rx queue information. + * @see rte_eth_recycle_rxq_info_get, struct rte_eth_recycle_rxq_info + * + * Currently, the rte_eth_recycle_mbufs() function can support to feed 1 Rx queue from + * 2 Tx queues in the same thread. Do not pair the Rx queue and Tx queue in different + * threads, in order to avoid memory error rewriting. + * + * @param rx_port_id + * Port identifying the receive side. + * @param rx_queue_id + * The index of the receive queue identifying the receive side. + * The value must be in the range [0, nb_rx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param tx_port_id + * Port identifying the transmit side. + * @param tx_queue_id + * The index of the transmit queue identifying the transmit side. + * The value must be in the range [0, nb_tx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param recycle_rxq_info + * A pointer to a structure of type *rte_eth_recycle_rxq_info* which contains + * the information of the Rx queue mbuf ring. + * @return + * The number of recycling mbufs. + */ +__rte_experimental +static inline uint16_t +rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, + uint16_t tx_port_id, uint16_t tx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) +{ + struct rte_eth_fp_ops *p1, *p2; + void *qd1, *qd2; + uint16_t nb_mbufs; + +#ifdef RTE_ETHDEV_DEBUG_TX + if (tx_port_id >= RTE_MAX_ETHPORTS || + tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, + "Invalid tx_port_id=%u or tx_queue_id=%u\n", + tx_port_id, tx_queue_id); + return 0; + } +#endif + + /* fetch pointer to Tx queue data */ + p1 = &rte_eth_fp_ops[tx_port_id]; + qd1 = p1->txq.data[tx_queue_id]; + +#ifdef RTE_ETHDEV_DEBUG_TX + RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0); + + if (qd1 == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n", + tx_queue_id, tx_port_id); + return 0; + } +#endif + if (p1->recycle_tx_mbufs_reuse == NULL) + return 0; + +#ifdef RTE_ETHDEV_DEBUG_RX + if (rx_port_id >= RTE_MAX_ETHPORTS || + rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) { + RTE_ETHDEV_LOG(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u\n", + rx_port_id, rx_queue_id); + return 0; + } +#endif + + /* fetch pointer to Rx queue data */ + p2 = &rte_eth_fp_ops[rx_port_id]; + qd2 = p2->rxq.data[rx_queue_id]; + +#ifdef RTE_ETHDEV_DEBUG_RX + RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0); + + if (qd2 == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n", + rx_queue_id, rx_port_id); + return 0; + } +#endif + if (p2->recycle_rx_descriptors_refill == NULL) + return 0; + + /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring + * into Rx mbuf ring. + */ + nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info); + + /* If no recycling mbufs, return 0. */ + if (nb_mbufs == 0) + return 0; + + /* Replenish the Rx descriptors with the recycling + * into Rx mbuf ring. + */ + p2->recycle_rx_descriptors_refill(qd2, nb_mbufs); + + return nb_mbufs; +} + /** * @warning * @b EXPERIMENTAL: this API may change without prior notice diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 46e9721e07c..32f5f7376a2 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -5,8 +5,6 @@ #ifndef _RTE_ETHDEV_CORE_H_ #define _RTE_ETHDEV_CORE_H_ -#include - /** * @file * @@ -55,6 +53,13 @@ typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset); /** @internal Check the status of a Tx descriptor */ typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset); +/** @internal Copy used mbufs from Tx mbuf ring into Rx mbuf ring */ +typedef uint16_t (*eth_recycle_tx_mbufs_reuse_t)(void *txq, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); + +/** @internal Refill Rx descriptors with the recycling mbufs */ +typedef void (*eth_recycle_rx_descriptors_refill_t)(void *rxq, uint16_t nb); + /** * @internal * Structure used to hold opaque pointers to internal ethdev Rx/Tx @@ -83,15 +88,17 @@ struct rte_eth_fp_ops { * Rx fast-path functions and related data. * 64-bit systems: occupies first 64B line */ + /** Rx queues data. */ + struct rte_ethdev_qdata rxq; /** PMD receive function. */ eth_rx_burst_t rx_pkt_burst; /** Get the number of used Rx descriptors. */ eth_rx_queue_count_t rx_queue_count; /** Check the status of a Rx descriptor. */ eth_rx_descriptor_status_t rx_descriptor_status; - /** Rx queues data. */ - struct rte_ethdev_qdata rxq; - uintptr_t reserved1[3]; + /** Refill Rx descriptors with the recycling mbufs. */ + eth_recycle_rx_descriptors_refill_t recycle_rx_descriptors_refill; + uintptr_t reserved1[2]; /**@}*/ /**@{*/ @@ -99,15 +106,17 @@ struct rte_eth_fp_ops { * Tx fast-path functions and related data. * 64-bit systems: occupies second 64B line */ + /** Tx queues data. */ + struct rte_ethdev_qdata txq; /** PMD transmit function. */ eth_tx_burst_t tx_pkt_burst; /** PMD transmit prepare function. */ eth_tx_prep_t tx_pkt_prepare; /** Check the status of a Tx descriptor. */ eth_tx_descriptor_status_t tx_descriptor_status; - /** Tx queues data. */ - struct rte_ethdev_qdata txq; - uintptr_t reserved2[3]; + /** Copy used mbufs from Tx mbuf ring into Rx. */ + eth_recycle_tx_mbufs_reuse_t recycle_tx_mbufs_reuse; + uintptr_t reserved2[2]; /**@}*/ } __rte_cache_aligned; diff --git a/lib/ethdev/rte_flow.c b/lib/ethdev/rte_flow.c index 271d854f780..3a67f1aaba9 100644 --- a/lib/ethdev/rte_flow.c +++ b/lib/ethdev/rte_flow.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -166,6 +167,7 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = { MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)), MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)), MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)), + MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)), }; /** Generate flow_action[] entry. */ @@ -1973,6 +1975,28 @@ rte_flow_template_table_destroy(uint16_t port_id, NULL, rte_strerror(ENOTSUP)); } +int +rte_flow_group_set_miss_actions(uint16_t port_id, + uint32_t group_id, + const struct rte_flow_group_attr *attr, + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->group_set_miss_actions)) { + return flow_err(port_id, + ops->group_set_miss_actions(dev, group_id, attr, actions, error), + error); + } + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOTSUP)); +} + struct rte_flow * rte_flow_async_create(uint16_t port_id, uint32_t queue_id, @@ -2202,6 +2226,8 @@ rte_flow_async_action_handle_query(uint16_t port_id, const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); int ret; + if (unlikely(!ops)) + return -rte_errno; ret = ops->async_action_handle_query(dev, queue_id, op_attr, action_handle, data, user_data, error); ret = flow_err(port_id, ret, error); @@ -2431,3 +2457,24 @@ rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_ ret); return ret; } + +int +rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table, + const struct rte_flow_item pattern[], uint8_t pattern_template_index, + uint32_t *hash, struct rte_flow_error *error) +{ + int ret; + struct rte_eth_dev *dev; + const struct rte_flow_ops *ops; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + ops = rte_flow_ops_get(port_id, error); + if (!ops || !ops->flow_calc_table_hash) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "action_list async query_update not supported"); + dev = &rte_eth_devices[port_id]; + ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index, + hash, error); + return flow_err(port_id, ret, error); +} diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h index 2ebb76dbc08..edefa34c10d 100644 --- a/lib/ethdev/rte_flow.h +++ b/lib/ethdev/rte_flow.h @@ -129,6 +129,12 @@ struct rte_flow_attr { uint32_t reserved:29; /**< Reserved, must be zero. */ }; +struct rte_flow_group_attr { + uint32_t ingress:1; + uint32_t egress:1; + uint32_t transfer:1; +}; + /** * Matching pattern item types. * @@ -688,6 +694,14 @@ enum rte_flow_item_type { * @see struct rte_flow_item_ib_bth. */ RTE_FLOW_ITEM_TYPE_IB_BTH, + + /** + * Matches the packet type as defined in rte_mbuf_ptype. + * + * See struct rte_flow_item_ptype. + * + */ + RTE_FLOW_ITEM_TYPE_PTYPE, }; /** @@ -2303,6 +2317,23 @@ static const struct rte_flow_item_tx_queue rte_flow_item_tx_queue_mask = { }; #endif +/** + * + * RTE_FLOW_ITEM_TYPE_PTYPE + * + * Matches the packet type as defined in rte_mbuf_ptype. + */ +struct rte_flow_item_ptype { + uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_PTYPE. */ +#ifndef __cplusplus +static const struct rte_flow_item_ptype rte_flow_item_ptype_mask = { + .packet_type = 0xffffffff, +}; +#endif + /** * Action types. * @@ -2931,7 +2962,6 @@ enum rte_flow_action_type { * The packets will be received by the kernel driver sharing * the same device as the DPDK port on which this action is configured. * This action mostly suits bifurcated driver model. - * This is an ingress non-transfer action only. * * No associated configuration structure. */ @@ -2981,6 +3011,15 @@ enum rte_flow_action_type { * @see struct rte_flow_action_indirect_list */ RTE_FLOW_ACTION_TYPE_INDIRECT_LIST, + + /** + * Program action. These actions are defined by the program currently + * loaded on the device. For example, these actions are applicable to + * devices that can be programmed through the P4 language. + * + * @see struct rte_flow_action_prog. + */ + RTE_FLOW_ACTION_TYPE_PROG, }; /** @@ -3196,6 +3235,13 @@ enum rte_eth_hash_function { * src or dst address will xor with zero pair. */ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, + /** + * Symmetric Toeplitz: L3 and L4 fields are sorted prior to + * the hash function. + * If src_ip > dst_ip, swap src_ip and dst_ip. + * If src_port > dst_port, swap src_port and dst_port. + */ + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT, RTE_ETH_HASH_FUNCTION_MAX, }; @@ -3875,6 +3921,10 @@ enum rte_flow_field_id { RTE_FLOW_FIELD_GENEVE_OPT_CLASS,/**< GENEVE option class. */ RTE_FLOW_FIELD_GENEVE_OPT_DATA, /**< GENEVE option data. */ RTE_FLOW_FIELD_MPLS, /**< MPLS header. */ + RTE_FLOW_FIELD_TCP_DATA_OFFSET, /**< TCP data offset. */ + RTE_FLOW_FIELD_IPV4_IHL, /**< IPv4 IHL. */ + RTE_FLOW_FIELD_IPV4_TOTAL_LEN, /**< IPv4 total length. */ + RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN /**< IPv6 payload length. */ }; /** @@ -4016,8 +4066,6 @@ struct rte_flow_action_meter_mark { struct rte_flow_meter_policy *policy; /** Metering mode: 0 - Color-Blind, 1 - Color-Aware. */ int color_mode; - /** Initial Color applied to packets in Color-Aware mode. */ - enum rte_color init_color; /** Metering state: 0 - Disabled, 1 - Enabled. */ int state; }; @@ -4036,12 +4084,10 @@ struct rte_flow_update_meter_mark { uint32_t policy_valid:1; /** The color mode will be updated. */ uint32_t color_mode_valid:1; - /** The initial color will be updated. */ - uint32_t init_color_valid:1; /** The meter state will be updated. */ uint32_t state_valid:1; /** Reserved bits for the future usage. */ - uint32_t reserved:27; + uint32_t reserved:28; }; /** @@ -4055,6 +4101,48 @@ struct rte_flow_indirect_update_flow_meter_mark { enum rte_color init_color; }; +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * Program action argument configuration parameters. + * + * For each action argument, its *size* must be non-zero and its *value* must + * point to a valid array of *size* bytes specified in network byte order. + * + * @see struct rte_flow_action_prog + */ +struct rte_flow_action_prog_argument { + /** Argument name. */ + const char *name; + /** Argument size in bytes. */ + uint32_t size; + /** Argument value. */ + const uint8_t *value; +}; + +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice. + * + * RTE_FLOW_ACTION_TYPE_PROG + * + * Program action configuration parameters. + * + * Each action can have zero or more arguments. When *args_num* is non-zero, the + * *args* parameter must point to a valid array of *args_num* elements. + * + * @see RTE_FLOW_ACTION_TYPE_PROG + */ +struct rte_flow_action_prog { + /** Action name. */ + const char *name; + /** Number of action arguments. */ + uint32_t args_num; + /** Action arguments array. */ + const struct rte_flow_action_prog_argument *args; +}; + /* Mbuf dynamic field offset for metadata. */ extern int32_t rte_flow_dynf_metadata_offs; @@ -5828,6 +5916,35 @@ rte_flow_template_table_destroy(uint16_t port_id, struct rte_flow_template_table *template_table, struct rte_flow_error *error); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Set group miss actions. + * + * @param port_id + * Port identifier of Ethernet device. + * @param group_id + * Identifier of a group to set miss actions for. + * @param attr + * Group attributes. + * @param actions + * List of group miss actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * PMDs initialize this structure in case of error only. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +__rte_experimental +int +rte_flow_group_set_miss_actions(uint16_t port_id, + uint32_t group_id, + const struct rte_flow_group_attr *attr, + const struct rte_flow_action actions[], + struct rte_flow_error *error); + /** * @warning * @b EXPERIMENTAL: this API may change without prior notice. @@ -6625,6 +6742,38 @@ rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_ void *user_data, struct rte_flow_error *error); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Calculate the hash for a given pattern in a given table as + * calculated by the HW. + * + * @param port_id + * Port identifier of Ethernet device. + * @param table + * The table the SW wishes to simulate. + * @param pattern + * The values to be used in the hash calculation. + * @param pattern_template_index + * The pattern index in the table to be used for the calculation. + * @param hash + * Used to return the calculated hash. + * @param error + * Perform verbose error reporting if not NULL. + * PMDs initialize this structure in case of error only. + * + * @return + * - (0) if success. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOTSUP) if underlying device does not support this functionality. + */ +__rte_experimental +int +rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table, + const struct rte_flow_item pattern[], uint8_t pattern_template_index, + uint32_t *hash, struct rte_flow_error *error); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_flow_driver.h b/lib/ethdev/rte_flow_driver.h index f9fb01b8a2a..f35f6595036 100644 --- a/lib/ethdev/rte_flow_driver.h +++ b/lib/ethdev/rte_flow_driver.h @@ -227,6 +227,13 @@ struct rte_flow_ops { (struct rte_eth_dev *dev, struct rte_flow_template_table *template_table, struct rte_flow_error *err); + /** See rte_flow_group_set_miss_actions() */ + int (*group_set_miss_actions) + (struct rte_eth_dev *dev, + uint32_t group_id, + const struct rte_flow_group_attr *attr, + const struct rte_flow_action actions[], + struct rte_flow_error *err); /** See rte_flow_async_create() */ struct rte_flow *(*async_create) (struct rte_eth_dev *dev, @@ -358,6 +365,11 @@ struct rte_flow_ops { const void **update, void **query, enum rte_flow_query_update_mode mode, void *user_data, struct rte_flow_error *error); + /** @see rte_flow_calc_table_hash() */ + int (*flow_calc_table_hash) + (struct rte_eth_dev *dev, const struct rte_flow_template_table *table, + const struct rte_flow_item pattern[], uint8_t pattern_template_index, + uint32_t *hash, struct rte_flow_error *error); }; /** diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index b965d6aa527..919ba5b8e65 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -312,6 +312,11 @@ EXPERIMENTAL { rte_flow_async_action_list_handle_query_update; rte_flow_async_actions_update; rte_flow_restore_info_dynflag; + + # added in 23.11 + rte_eth_recycle_rx_queue_info_get; + rte_flow_group_set_miss_actions; + rte_flow_calc_table_hash; }; INTERNAL { diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h index f62f42e1404..30bd90085c4 100644 --- a/lib/eventdev/eventdev_pmd.h +++ b/lib/eventdev/eventdev_pmd.h @@ -119,8 +119,8 @@ struct rte_eventdev_data { /**< Array of port configuration structures. */ struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV]; /**< Array of queue configuration structures. */ - uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV * - RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint16_t links_map[RTE_EVENT_MAX_PROFILES_PER_PORT] + [RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV]; /**< Memory to store queues to port connections. */ void *dev_private; /**< PMD-specific private data */ @@ -178,8 +178,13 @@ struct rte_eventdev { event_tx_adapter_enqueue_t txa_enqueue; /**< Pointer to PMD eth Tx adapter enqueue function. */ event_crypto_adapter_enqueue_t ca_enqueue; + /**< Pointer to PMD crypto adapter enqueue function. */ + event_dma_adapter_enqueue_t dma_enqueue; + /**< Pointer to PMD DMA adapter enqueue function. */ + event_profile_switch_t profile_switch; + /**< Pointer to PMD Event switch profile function. */ - uint64_t reserved_64s[4]; /**< Reserved for future fields */ + uint64_t reserved_64s[3]; /**< Reserved for future fields */ void *reserved_ptrs[3]; /**< Reserved for future fields */ } __rte_cache_aligned; @@ -437,6 +442,32 @@ typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links); +/** + * Link multiple source event queues associated with a link profile to a + * destination event port. + * + * @param dev + * Event device pointer + * @param port + * Event port pointer + * @param queues + * Points to an array of *nb_links* event queues to be linked + * to the event port. + * @param priorities + * Points to an array of *nb_links* service priorities associated with each + * event queue link to event port. + * @param nb_links + * The number of links to establish. + * @param profile_id + * The profile ID to associate the links. + * + * @return + * Returns 0 on success. + */ +typedef int (*eventdev_port_link_profile_t)(struct rte_eventdev *dev, void *port, + const uint8_t queues[], const uint8_t priorities[], + uint16_t nb_links, uint8_t profile_id); + /** * Unlink multiple source event queues from destination event port. * @@ -455,6 +486,28 @@ typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port, typedef int (*eventdev_port_unlink_t)(struct rte_eventdev *dev, void *port, uint8_t queues[], uint16_t nb_unlinks); +/** + * Unlink multiple source event queues associated with a link profile from + * destination event port. + * + * @param dev + * Event device pointer + * @param port + * Event port pointer + * @param queues + * An array of *nb_unlinks* event queues to be unlinked from the event port. + * @param nb_unlinks + * The number of unlinks to establish + * @param profile_id + * The profile ID of the associated links. + * + * @return + * Returns 0 on success. + */ +typedef int (*eventdev_port_unlink_profile_t)(struct rte_eventdev *dev, void *port, + uint8_t queues[], uint16_t nb_unlinks, + uint8_t profile_id); + /** * Unlinks in progress. Returns number of unlinks that the PMD is currently * performing, but have not yet been completed. @@ -1320,6 +1373,156 @@ typedef int (*eventdev_eth_tx_adapter_queue_stop) #define eventdev_stop_flush_t rte_eventdev_stop_flush_t +/** + * Retrieve the event device's DMA adapter capabilities for the + * specified DMA device + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @param[out] caps + * A pointer to memory filled with event adapter capabilities. + * It is expected to be pre-allocated & initialized by caller. + * + * @return + * - 0: Success, driver provides event adapter capabilities for the + * dmadev. + * - <0: Error code returned by the driver function. + * + */ +typedef int (*eventdev_dma_adapter_caps_get_t)(const struct rte_eventdev *dev, + const int16_t dma_dev_id, uint32_t *caps); + +/** + * Add DMA vchan queue to event device. This callback is invoked if + * the caps returned from rte_event_dma_adapter_caps_get(, dmadev_id) + * has RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_* set. + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @param vchan_id + * dmadev vchan queue identifier. + * + * @param event + * Event information required for binding dmadev vchan to event queue. + * This structure will have a valid value for only those HW PMDs supporting + * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND capability. + * + * @return + * - 0: Success, dmadev vchan added successfully. + * - <0: Error code returned by the driver function. + * + */ +typedef int (*eventdev_dma_adapter_vchan_add_t)(const struct rte_eventdev *dev, + const int16_t dma_dev_id, + uint16_t vchan_id, + const struct rte_event *event); + +/** + * Delete DMA vhcan to event device. This callback is invoked if + * the caps returned from rte_event_dma_adapter_caps_get(, dmadev_id) + * has RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_* set. + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @param vchan_id + * dmadev vchan identifier. + * + * @return + * - 0: Success, dmadev vchan deleted successfully. + * - <0: Error code returned by the driver function. + * + */ +typedef int (*eventdev_dma_adapter_vchan_del_t)(const struct rte_eventdev *dev, + const int16_t dma_dev_id, + uint16_t vchan_id); + +/** + * Start DMA adapter. This callback is invoked if + * the caps returned from rte_event_dma_adapter_caps_get(.., dmadev_id) + * has RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_* set and vchan for dmadev_id + * have been added to the event device. + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @return + * - 0: Success, DMA adapter started successfully. + * - <0: Error code returned by the driver function. + */ +typedef int (*eventdev_dma_adapter_start_t)(const struct rte_eventdev *dev, + const int16_t dma_dev_id); + +/** + * Stop DMA adapter. This callback is invoked if + * the caps returned from rte_event_dma_adapter_caps_get(.., dmadev_id) + * has RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_* set and vchan for dmadev_id + * have been added to the event device. + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @return + * - 0: Success, DMA adapter stopped successfully. + * - <0: Error code returned by the driver function. + */ +typedef int (*eventdev_dma_adapter_stop_t)(const struct rte_eventdev *dev, + const int16_t dma_dev_id); + +struct rte_event_dma_adapter_stats; + +/** + * Retrieve DMA adapter statistics. + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @param[out] stats + * Pointer to stats structure + * + * @return + * Return 0 on success. + */ +typedef int (*eventdev_dma_adapter_stats_get)(const struct rte_eventdev *dev, + const int16_t dma_dev_id, + struct rte_event_dma_adapter_stats *stats); + +/** + * Reset DMA adapter statistics. + * + * @param dev + * Event device pointer + * + * @param dma_dev_id + * DMA device identifier + * + * @return + * Return 0 on success. + */ +typedef int (*eventdev_dma_adapter_stats_reset)(const struct rte_eventdev *dev, + const int16_t dma_dev_id); + + /** Event device operations function pointer table */ struct eventdev_ops { eventdev_info_get_t dev_infos_get; /**< Get device info. */ @@ -1348,8 +1551,12 @@ struct eventdev_ops { eventdev_port_link_t port_link; /**< Link event queues to an event port. */ + eventdev_port_link_profile_t port_link_profile; + /**< Link event queues associated with a profile to an event port. */ eventdev_port_unlink_t port_unlink; /**< Unlink event queues from an event port. */ + eventdev_port_unlink_profile_t port_unlink_profile; + /**< Unlink event queues associated with a profile from an event port. */ eventdev_port_unlinks_in_progress_t port_unlinks_in_progress; /**< Unlinks in progress on an event port. */ eventdev_dequeue_timeout_ticks_t timeout_ticks; @@ -1440,6 +1647,21 @@ struct eventdev_ops { eventdev_eth_tx_adapter_queue_stop eth_tx_adapter_queue_stop; /**< Stop Tx queue assigned to Tx adapter instance */ + eventdev_dma_adapter_caps_get_t dma_adapter_caps_get; + /**< Get DMA adapter capabilities */ + eventdev_dma_adapter_vchan_add_t dma_adapter_vchan_add; + /**< Add vchan queue to DMA adapter */ + eventdev_dma_adapter_vchan_del_t dma_adapter_vchan_del; + /**< Delete vchan queue from DMA adapter */ + eventdev_dma_adapter_start_t dma_adapter_start; + /**< Start DMA adapter */ + eventdev_dma_adapter_stop_t dma_adapter_stop; + /**< Stop DMA adapter */ + eventdev_dma_adapter_stats_get dma_adapter_stats_get; + /**< Get DMA stats */ + eventdev_dma_adapter_stats_reset dma_adapter_stats_reset; + /**< Reset DMA stats */ + eventdev_selftest dev_selftest; /**< Start eventdev Selftest */ diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c index 1d3d9d357e6..017f97ccabf 100644 --- a/lib/eventdev/eventdev_private.c +++ b/lib/eventdev/eventdev_private.c @@ -81,6 +81,21 @@ dummy_event_crypto_adapter_enqueue(__rte_unused void *port, return 0; } +static uint16_t +dummy_event_dma_adapter_enqueue(__rte_unused void *port, __rte_unused struct rte_event ev[], + __rte_unused uint16_t nb_events) +{ + RTE_EDEV_LOG_ERR("event DMA adapter enqueue requested for unconfigured event device"); + return 0; +} + +static int +dummy_event_port_profile_switch(__rte_unused void *port, __rte_unused uint8_t profile_id) +{ + RTE_EDEV_LOG_ERR("change profile requested for unconfigured event device"); + return -EINVAL; +} + void event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op) { @@ -97,6 +112,8 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op) .txa_enqueue_same_dest = dummy_event_tx_adapter_enqueue_same_dest, .ca_enqueue = dummy_event_crypto_adapter_enqueue, + .dma_enqueue = dummy_event_dma_adapter_enqueue, + .profile_switch = dummy_event_port_profile_switch, .data = dummy_data, }; @@ -117,5 +134,7 @@ event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op, fp_op->txa_enqueue = dev->txa_enqueue; fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest; fp_op->ca_enqueue = dev->ca_enqueue; + fp_op->dma_enqueue = dev->dma_enqueue; + fp_op->profile_switch = dev->profile_switch; fp_op->data = dev->data->ports; } diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h index f008ef00913..9c2b261c06d 100644 --- a/lib/eventdev/eventdev_trace.h +++ b/lib/eventdev/eventdev_trace.h @@ -76,6 +76,17 @@ RTE_TRACE_POINT( rte_trace_point_emit_int(rc); ) +RTE_TRACE_POINT( + rte_eventdev_trace_port_profile_links_set, + RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, + uint16_t nb_links, uint8_t profile_id, int rc), + rte_trace_point_emit_u8(dev_id); + rte_trace_point_emit_u8(port_id); + rte_trace_point_emit_u16(nb_links); + rte_trace_point_emit_u8(profile_id); + rte_trace_point_emit_int(rc); +) + RTE_TRACE_POINT( rte_eventdev_trace_port_unlink, RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, @@ -86,6 +97,17 @@ RTE_TRACE_POINT( rte_trace_point_emit_int(rc); ) +RTE_TRACE_POINT( + rte_eventdev_trace_port_profile_unlink, + RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, + uint16_t nb_unlinks, uint8_t profile_id, int rc), + rte_trace_point_emit_u8(dev_id); + rte_trace_point_emit_u8(port_id); + rte_trace_point_emit_u16(nb_unlinks); + rte_trace_point_emit_u8(profile_id); + rte_trace_point_emit_int(rc); +) + RTE_TRACE_POINT( rte_eventdev_trace_start, RTE_TRACE_POINT_ARGS(uint8_t dev_id, int rc), @@ -487,6 +509,16 @@ RTE_TRACE_POINT( rte_trace_point_emit_int(count); ) +RTE_TRACE_POINT( + rte_eventdev_trace_port_profile_links_get, + RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, uint8_t profile_id, + int count), + rte_trace_point_emit_u8(dev_id); + rte_trace_point_emit_u8(port_id); + rte_trace_point_emit_u8(profile_id); + rte_trace_point_emit_int(count); +) + RTE_TRACE_POINT( rte_eventdev_trace_port_unlinks_in_progress, RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id), diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c index 76144cfe753..8024e075319 100644 --- a/lib/eventdev/eventdev_trace_points.c +++ b/lib/eventdev/eventdev_trace_points.c @@ -19,9 +19,15 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_setup, RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_link, lib.eventdev.port.link) +RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_links_set, + lib.eventdev.port.profile.links.set) + RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_unlink, lib.eventdev.port.unlink) +RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_unlink, + lib.eventdev.port.profile.unlink) + RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_start, lib.eventdev.start) @@ -40,6 +46,9 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_deq_burst, RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_maintain, lib.eventdev.maintain) +RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_switch, + lib.eventdev.port.profile.switch) + /* Eventdev Rx adapter trace points */ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_eth_rx_adapter_create, lib.eventdev.rx.adapter.create) @@ -206,6 +215,9 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_default_conf_get, RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_links_get, lib.eventdev.port.links.get) +RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_links_get, + lib.eventdev.port.profile.links.get) + RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_unlinks_in_progress, lib.eventdev.port.unlinks.in.progress) diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build index 6edf98dfa5f..a04bb86f0f2 100644 --- a/lib/eventdev/meson.build +++ b/lib/eventdev/meson.build @@ -7,16 +7,11 @@ if is_windows subdir_done() endif -if is_linux - cflags += '-DLINUX' -else - cflags += '-DBSD' -endif - sources = files( 'eventdev_private.c', 'eventdev_trace_points.c', 'rte_event_crypto_adapter.c', + 'rte_event_dma_adapter.c', 'rte_event_eth_rx_adapter.c', 'rte_event_eth_tx_adapter.c', 'rte_event_ring.c', @@ -25,6 +20,7 @@ sources = files( ) headers = files( 'rte_event_crypto_adapter.h', + 'rte_event_dma_adapter.h', 'rte_event_eth_rx_adapter.h', 'rte_event_eth_tx_adapter.h', 'rte_event_ring.h', @@ -42,5 +38,5 @@ driver_sdk_headers += files( 'event_timer_adapter_pmd.h', ) -deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev'] +deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev', 'dmadev'] deps += ['telemetry'] diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c index 52a28e52d64..1b435c9f0e2 100644 --- a/lib/eventdev/rte_event_crypto_adapter.c +++ b/lib/eventdev/rte_event_crypto_adapter.c @@ -248,9 +248,18 @@ eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, n = *tailp - *headp; else if (*tailp < *headp) n = bufp->size - *headp; - else { - *nb_ops_flushed = 0; - return 0; /* buffer empty */ + else { /* head == tail case */ + /* when head == tail, + * circ buff is either full(tail pointer roll over) or empty + */ + if (bufp->count != 0) { + /* circ buffer is full */ + n = bufp->count; + } else { + /* circ buffer is empty */ + *nb_ops_flushed = 0; + return 0; /* buffer empty */ + } } *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, diff --git a/lib/eventdev/rte_event_dma_adapter.c b/lib/eventdev/rte_event_dma_adapter.c new file mode 100644 index 00000000000..af4b5ad3882 --- /dev/null +++ b/lib/eventdev/rte_event_dma_adapter.c @@ -0,0 +1,1434 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Marvell. + */ + +#include +#include + +#include "rte_event_dma_adapter.h" + +#define DMA_BATCH_SIZE 32 +#define DMA_DEFAULT_MAX_NB 128 +#define DMA_ADAPTER_NAME_LEN 32 +#define DMA_ADAPTER_BUFFER_SIZE 1024 + +#define DMA_ADAPTER_OPS_BUFFER_SIZE (DMA_BATCH_SIZE + DMA_BATCH_SIZE) + +#define DMA_ADAPTER_ARRAY "event_dma_adapter_array" + +/* Macros to check for valid adapter */ +#define EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \ + do { \ + if (!edma_adapter_valid_id(id)) { \ + RTE_EDEV_LOG_ERR("Invalid DMA adapter id = %d\n", id); \ + return retval; \ + } \ + } while (0) + +/* DMA ops circular buffer */ +struct dma_ops_circular_buffer { + /* Index of head element */ + uint16_t head; + + /* Index of tail element */ + uint16_t tail; + + /* Number of elements in buffer */ + uint16_t count; + + /* Size of circular buffer */ + uint16_t size; + + /* Pointer to hold rte_event_dma_adapter_op for processing */ + struct rte_event_dma_adapter_op **op_buffer; +} __rte_cache_aligned; + +/* Vchan information */ +struct dma_vchan_info { + /* Set to indicate vchan queue is enabled */ + bool vq_enabled; + + /* Circular buffer for batching DMA ops to dma_dev */ + struct dma_ops_circular_buffer dma_buf; +} __rte_cache_aligned; + +/* DMA device information */ +struct dma_device_info { + /* Pointer to vchan queue info */ + struct dma_vchan_info *vchanq; + + /* Pointer to vchan queue info. + * This holds ops passed by application till the + * dma completion is done. + */ + struct dma_vchan_info *tqmap; + + /* If num_vchanq > 0, the start callback will + * be invoked if not already invoked + */ + uint16_t num_vchanq; + + /* Number of vchans configured for a DMA device. */ + uint16_t num_dma_dev_vchan; + + /* Next queue pair to be processed */ + uint16_t next_vchan_id; + + /* Set to indicate processing has been started */ + uint8_t dev_started; + + /* Set to indicate dmadev->eventdev packet + * transfer uses a hardware mechanism + */ + uint8_t internal_event_port; +} __rte_cache_aligned; + +struct event_dma_adapter { + /* Event device identifier */ + uint8_t eventdev_id; + + /* Event port identifier */ + uint8_t event_port_id; + + /* Adapter mode */ + enum rte_event_dma_adapter_mode mode; + + /* Memory allocation name */ + char mem_name[DMA_ADAPTER_NAME_LEN]; + + /* Socket identifier cached from eventdev */ + int socket_id; + + /* Lock to serialize config updates with service function */ + rte_spinlock_t lock; + + /* Next dma device to be processed */ + uint16_t next_dmadev_id; + + /* DMA device structure array */ + struct dma_device_info *dma_devs; + + /* Circular buffer for processing DMA ops to eventdev */ + struct dma_ops_circular_buffer ebuf; + + /* Configuration callback for rte_service configuration */ + rte_event_dma_adapter_conf_cb conf_cb; + + /* Configuration callback argument */ + void *conf_arg; + + /* Set if default_cb is being used */ + int default_cb_arg; + + /* No. of vchan queue configured */ + uint16_t nb_vchanq; + + /* Per adapter EAL service ID */ + uint32_t service_id; + + /* Service initialization state */ + uint8_t service_initialized; + + /* Max DMA ops processed in any service function invocation */ + uint32_t max_nb; + + /* Store event port's implicit release capability */ + uint8_t implicit_release_disabled; + + /* Flag to indicate backpressure at dma_dev + * Stop further dequeuing events from eventdev + */ + bool stop_enq_to_dma_dev; + + /* Loop counter to flush dma ops */ + uint16_t transmit_loop_count; + + /* Per instance stats structure */ + struct rte_event_dma_adapter_stats dma_stats; +} __rte_cache_aligned; + +static struct event_dma_adapter **event_dma_adapter; + +static inline int +edma_adapter_valid_id(uint8_t id) +{ + return id < RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE; +} + +static inline struct event_dma_adapter * +edma_id_to_adapter(uint8_t id) +{ + return event_dma_adapter ? event_dma_adapter[id] : NULL; +} + +static int +edma_array_init(void) +{ + const struct rte_memzone *mz; + uint32_t sz; + + mz = rte_memzone_lookup(DMA_ADAPTER_ARRAY); + if (mz == NULL) { + sz = sizeof(struct event_dma_adapter *) * RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE; + sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); + + mz = rte_memzone_reserve_aligned(DMA_ADAPTER_ARRAY, sz, rte_socket_id(), 0, + RTE_CACHE_LINE_SIZE); + if (mz == NULL) { + RTE_EDEV_LOG_ERR("Failed to reserve memzone : %s, err = %d", + DMA_ADAPTER_ARRAY, rte_errno); + return -rte_errno; + } + } + + event_dma_adapter = mz->addr; + + return 0; +} + +static inline bool +edma_circular_buffer_batch_ready(struct dma_ops_circular_buffer *bufp) +{ + return bufp->count >= DMA_BATCH_SIZE; +} + +static inline bool +edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer *bufp) +{ + return (bufp->size - bufp->count) >= DMA_BATCH_SIZE; +} + +static inline int +edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer *buf, uint16_t sz) +{ + buf->op_buffer = rte_zmalloc(name, sizeof(struct rte_event_dma_adapter_op *) * sz, 0); + if (buf->op_buffer == NULL) + return -ENOMEM; + + buf->size = sz; + + return 0; +} + +static inline void +edma_circular_buffer_free(struct dma_ops_circular_buffer *buf) +{ + rte_free(buf->op_buffer); +} + +static inline int +edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_event_dma_adapter_op *op) +{ + uint16_t *tail = &bufp->tail; + + bufp->op_buffer[*tail] = op; + + /* circular buffer, go round */ + *tail = (*tail + 1) % bufp->size; + bufp->count++; + + return 0; +} + +static inline int +edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter, + struct dma_ops_circular_buffer *bufp, uint8_t dma_dev_id, + uint16_t vchan, uint16_t *nb_ops_flushed) +{ + struct rte_event_dma_adapter_op *op; + struct dma_vchan_info *tq; + uint16_t *head = &bufp->head; + uint16_t *tail = &bufp->tail; + uint16_t n; + uint16_t i; + int ret; + + if (*tail > *head) + n = *tail - *head; + else if (*tail < *head) + n = bufp->size - *head; + else { + *nb_ops_flushed = 0; + return 0; /* buffer empty */ + } + + tq = &adapter->dma_devs[dma_dev_id].tqmap[vchan]; + + for (i = 0; i < n; i++) { + op = bufp->op_buffer[*head]; + if (op->nb_src == 1 && op->nb_dst == 1) + ret = rte_dma_copy(dma_dev_id, vchan, op->src_seg->addr, op->dst_seg->addr, + op->src_seg->length, op->flags); + else + ret = rte_dma_copy_sg(dma_dev_id, vchan, op->src_seg, op->dst_seg, + op->nb_src, op->nb_dst, op->flags); + if (ret < 0) + break; + + /* Enqueue in transaction queue. */ + edma_circular_buffer_add(&tq->dma_buf, op); + + *head = (*head + 1) % bufp->size; + } + + *nb_ops_flushed = i; + bufp->count -= *nb_ops_flushed; + if (!bufp->count) { + *head = 0; + *tail = 0; + } + + return *nb_ops_flushed == n ? 0 : -1; +} + +static int +edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapter_conf *conf, + void *arg) +{ + struct rte_event_port_conf *port_conf; + struct rte_event_dev_config dev_conf; + struct event_dma_adapter *adapter; + struct rte_eventdev *dev; + uint8_t port_id; + int started; + int ret; + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + dev = &rte_eventdevs[adapter->eventdev_id]; + dev_conf = dev->data->dev_conf; + + started = dev->data->dev_started; + if (started) + rte_event_dev_stop(evdev_id); + + port_id = dev_conf.nb_event_ports; + dev_conf.nb_event_ports += 1; + + port_conf = arg; + if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK) + dev_conf.nb_single_link_event_port_queues += 1; + + ret = rte_event_dev_configure(evdev_id, &dev_conf); + if (ret) { + RTE_EDEV_LOG_ERR("Failed to configure event dev %u\n", evdev_id); + if (started) { + if (rte_event_dev_start(evdev_id)) + return -EIO; + } + return ret; + } + + ret = rte_event_port_setup(evdev_id, port_id, port_conf); + if (ret) { + RTE_EDEV_LOG_ERR("Failed to setup event port %u\n", port_id); + return ret; + } + + conf->event_port_id = port_id; + conf->max_nb = DMA_DEFAULT_MAX_NB; + if (started) + ret = rte_event_dev_start(evdev_id); + + adapter->default_cb_arg = 1; + adapter->event_port_id = conf->event_port_id; + + return ret; +} + +int +rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id, + rte_event_dma_adapter_conf_cb conf_cb, + enum rte_event_dma_adapter_mode mode, void *conf_arg) +{ + struct rte_event_dev_info dev_info; + struct event_dma_adapter *adapter; + char name[DMA_ADAPTER_NAME_LEN]; + struct rte_dma_info info; + uint16_t num_dma_dev; + int socket_id; + uint8_t i; + int ret; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(evdev_id, -EINVAL); + + if (conf_cb == NULL) + return -EINVAL; + + if (event_dma_adapter == NULL) { + ret = edma_array_init(); + if (ret) + return ret; + } + + adapter = edma_id_to_adapter(id); + if (adapter != NULL) { + RTE_EDEV_LOG_ERR("ML adapter ID %d already exists!", id); + return -EEXIST; + } + + socket_id = rte_event_dev_socket_id(evdev_id); + snprintf(name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id); + adapter = rte_zmalloc_socket(name, sizeof(struct event_dma_adapter), RTE_CACHE_LINE_SIZE, + socket_id); + if (adapter == NULL) { + RTE_EDEV_LOG_ERR("Failed to get mem for event ML adapter!"); + return -ENOMEM; + } + + if (edma_circular_buffer_init("edma_circular_buffer", &adapter->ebuf, + DMA_ADAPTER_BUFFER_SIZE)) { + RTE_EDEV_LOG_ERR("Failed to get memory for event adapter circular buffer"); + rte_free(adapter); + return -ENOMEM; + } + + ret = rte_event_dev_info_get(evdev_id, &dev_info); + if (ret < 0) { + RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s", evdev_id, + dev_info.driver_name); + edma_circular_buffer_free(&adapter->ebuf); + rte_free(adapter); + return ret; + } + + num_dma_dev = rte_dma_count_avail(); + + adapter->eventdev_id = evdev_id; + adapter->mode = mode; + rte_strscpy(adapter->mem_name, name, DMA_ADAPTER_NAME_LEN); + adapter->socket_id = socket_id; + adapter->conf_cb = conf_cb; + adapter->conf_arg = conf_arg; + adapter->dma_devs = rte_zmalloc_socket(adapter->mem_name, + num_dma_dev * sizeof(struct dma_device_info), 0, + socket_id); + if (adapter->dma_devs == NULL) { + RTE_EDEV_LOG_ERR("Failed to get memory for DMA devices\n"); + edma_circular_buffer_free(&adapter->ebuf); + rte_free(adapter); + return -ENOMEM; + } + + rte_spinlock_init(&adapter->lock); + for (i = 0; i < num_dma_dev; i++) { + ret = rte_dma_info_get(i, &info); + if (ret) { + RTE_EDEV_LOG_ERR("Failed to get dma device info\n"); + edma_circular_buffer_free(&adapter->ebuf); + rte_free(adapter); + return ret; + } + + adapter->dma_devs[i].num_dma_dev_vchan = info.nb_vchans; + } + + event_dma_adapter[id] = adapter; + + return 0; +} + +int +rte_event_dma_adapter_create(uint8_t id, uint8_t evdev_id, struct rte_event_port_conf *port_config, + enum rte_event_dma_adapter_mode mode) +{ + struct rte_event_port_conf *pc; + int ret; + + if (port_config == NULL) + return -EINVAL; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + pc = rte_malloc(NULL, sizeof(struct rte_event_port_conf), 0); + if (pc == NULL) + return -ENOMEM; + + rte_memcpy(pc, port_config, sizeof(struct rte_event_port_conf)); + ret = rte_event_dma_adapter_create_ext(id, evdev_id, edma_default_config_cb, mode, pc); + if (ret != 0) + rte_free(pc); + + return ret; +} + +int +rte_event_dma_adapter_free(uint8_t id) +{ + struct event_dma_adapter *adapter; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + rte_free(adapter->conf_arg); + rte_free(adapter->dma_devs); + edma_circular_buffer_free(&adapter->ebuf); + rte_free(adapter); + event_dma_adapter[id] = NULL; + + return 0; +} + +int +rte_event_dma_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) +{ + struct event_dma_adapter *adapter; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = edma_id_to_adapter(id); + if (adapter == NULL || event_port_id == NULL) + return -EINVAL; + + *event_port_id = adapter->event_port_id; + + return 0; +} + +static inline unsigned int +edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, unsigned int cnt) +{ + struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats; + struct dma_vchan_info *vchan_qinfo = NULL; + struct rte_event_dma_adapter_op *dma_op; + uint16_t vchan, nb_enqueued = 0; + int16_t dma_dev_id; + unsigned int i, n; + int ret; + + ret = 0; + n = 0; + stats->event_deq_count += cnt; + + for (i = 0; i < cnt; i++) { + dma_op = ev[i].event_ptr; + if (dma_op == NULL) + continue; + + /* Expected to have response info appended to dma_op. */ + + dma_dev_id = dma_op->dma_dev_id; + vchan = dma_op->vchan; + vchan_qinfo = &adapter->dma_devs[dma_dev_id].vchanq[vchan]; + if (!vchan_qinfo->vq_enabled) { + if (dma_op != NULL && dma_op->op_mp != NULL) + rte_mempool_put(dma_op->op_mp, dma_op); + continue; + } + edma_circular_buffer_add(&vchan_qinfo->dma_buf, dma_op); + + if (edma_circular_buffer_batch_ready(&vchan_qinfo->dma_buf)) { + ret = edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_qinfo->dma_buf, + dma_dev_id, vchan, + &nb_enqueued); + stats->dma_enq_count += nb_enqueued; + n += nb_enqueued; + + /** + * If some dma ops failed to flush to dma_dev and + * space for another batch is not available, stop + * dequeue from eventdev momentarily + */ + if (unlikely(ret < 0 && + !edma_circular_buffer_space_for_batch(&vchan_qinfo->dma_buf))) + adapter->stop_enq_to_dma_dev = true; + } + } + + return n; +} + +static unsigned int +edma_adapter_dev_flush(struct event_dma_adapter *adapter, int16_t dma_dev_id, + uint16_t *nb_ops_flushed) +{ + struct dma_vchan_info *vchan_info; + struct dma_device_info *dev_info; + uint16_t nb = 0, nb_enqueued = 0; + uint16_t vchan, nb_vchans; + + dev_info = &adapter->dma_devs[dma_dev_id]; + nb_vchans = dev_info->num_vchanq; + + for (vchan = 0; vchan < nb_vchans; vchan++) { + + vchan_info = &dev_info->vchanq[vchan]; + if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled)) + continue; + + edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_info->dma_buf, dma_dev_id, + vchan, &nb_enqueued); + *nb_ops_flushed += vchan_info->dma_buf.count; + nb += nb_enqueued; + } + + return nb; +} + +static unsigned int +edma_adapter_enq_flush(struct event_dma_adapter *adapter) +{ + struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats; + int16_t dma_dev_id; + uint16_t nb_enqueued = 0; + uint16_t nb_ops_flushed = 0; + uint16_t num_dma_dev = rte_dma_count_avail(); + + for (dma_dev_id = 0; dma_dev_id < num_dma_dev; dma_dev_id++) + nb_enqueued += edma_adapter_dev_flush(adapter, dma_dev_id, &nb_ops_flushed); + /** + * Enable dequeue from eventdev if all ops from circular + * buffer flushed to dma_dev + */ + if (!nb_ops_flushed) + adapter->stop_enq_to_dma_dev = false; + + stats->dma_enq_count += nb_enqueued; + + return nb_enqueued; +} + +/* Flush an instance's enqueue buffers every DMA_ENQ_FLUSH_THRESHOLD + * iterations of edma_adapter_enq_run() + */ +#define DMA_ENQ_FLUSH_THRESHOLD 1024 + +static int +edma_adapter_enq_run(struct event_dma_adapter *adapter, unsigned int max_enq) +{ + struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats; + uint8_t event_port_id = adapter->event_port_id; + uint8_t event_dev_id = adapter->eventdev_id; + struct rte_event ev[DMA_BATCH_SIZE]; + unsigned int nb_enq, nb_enqueued; + uint16_t n; + + if (adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) + return 0; + + nb_enqueued = 0; + for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) { + + if (unlikely(adapter->stop_enq_to_dma_dev)) { + nb_enqueued += edma_adapter_enq_flush(adapter); + + if (unlikely(adapter->stop_enq_to_dma_dev)) + break; + } + + stats->event_poll_count++; + n = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, DMA_BATCH_SIZE, 0); + + if (!n) + break; + + nb_enqueued += edma_enq_to_dma_dev(adapter, ev, n); + } + + if ((++adapter->transmit_loop_count & (DMA_ENQ_FLUSH_THRESHOLD - 1)) == 0) + nb_enqueued += edma_adapter_enq_flush(adapter); + + return nb_enqueued; +} + +#define DMA_ADAPTER_MAX_EV_ENQ_RETRIES 100 + +static inline uint16_t +edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_adapter_op **ops, + uint16_t num) +{ + struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats; + uint8_t event_port_id = adapter->event_port_id; + uint8_t event_dev_id = adapter->eventdev_id; + struct rte_event events[DMA_BATCH_SIZE]; + struct rte_event *response_info; + uint16_t nb_enqueued, nb_ev; + uint8_t retry; + uint8_t i; + + nb_ev = 0; + retry = 0; + nb_enqueued = 0; + num = RTE_MIN(num, DMA_BATCH_SIZE); + for (i = 0; i < num; i++) { + struct rte_event *ev = &events[nb_ev++]; + + /* Expected to have response info appended to dma_op. */ + response_info = (struct rte_event *)((uint8_t *)ops[i] + + sizeof(struct rte_event_dma_adapter_op)); + if (unlikely(response_info == NULL)) { + if (ops[i] != NULL && ops[i]->op_mp != NULL) + rte_mempool_put(ops[i]->op_mp, ops[i]); + continue; + } + + rte_memcpy(ev, response_info, sizeof(struct rte_event)); + ev->event_ptr = ops[i]; + ev->event_type = RTE_EVENT_TYPE_DMADEV; + if (adapter->implicit_release_disabled) + ev->op = RTE_EVENT_OP_FORWARD; + else + ev->op = RTE_EVENT_OP_NEW; + } + + do { + nb_enqueued += rte_event_enqueue_burst(event_dev_id, event_port_id, + &events[nb_enqueued], nb_ev - nb_enqueued); + + } while (retry++ < DMA_ADAPTER_MAX_EV_ENQ_RETRIES && nb_enqueued < nb_ev); + + stats->event_enq_fail_count += nb_ev - nb_enqueued; + stats->event_enq_count += nb_enqueued; + stats->event_enq_retry_count += retry - 1; + + return nb_enqueued; +} + +static int +edma_circular_buffer_flush_to_evdev(struct event_dma_adapter *adapter, + struct dma_ops_circular_buffer *bufp, + uint16_t *enqueue_count) +{ + struct rte_event_dma_adapter_op **ops = bufp->op_buffer; + uint16_t n = 0, nb_ops_flushed; + uint16_t *head = &bufp->head; + uint16_t *tail = &bufp->tail; + + if (*tail > *head) + n = *tail - *head; + else if (*tail < *head) + n = bufp->size - *head; + else { + if (enqueue_count) + *enqueue_count = 0; + return 0; /* buffer empty */ + } + + if (enqueue_count && n > *enqueue_count) + n = *enqueue_count; + + nb_ops_flushed = edma_ops_enqueue_burst(adapter, &ops[*head], n); + if (enqueue_count) + *enqueue_count = nb_ops_flushed; + + bufp->count -= nb_ops_flushed; + if (!bufp->count) { + *head = 0; + *tail = 0; + return 0; /* buffer empty */ + } + + *head = (*head + nb_ops_flushed) % bufp->size; + return 1; +} + +static void +edma_ops_buffer_flush(struct event_dma_adapter *adapter) +{ + if (likely(adapter->ebuf.count == 0)) + return; + + while (edma_circular_buffer_flush_to_evdev(adapter, &adapter->ebuf, NULL)) + ; +} + +static inline unsigned int +edma_adapter_deq_run(struct event_dma_adapter *adapter, unsigned int max_deq) +{ + struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats; + struct dma_vchan_info *vchan_info; + struct dma_ops_circular_buffer *tq_buf; + struct rte_event_dma_adapter_op *ops; + uint16_t n, nb_deq, nb_enqueued, i; + struct dma_device_info *dev_info; + uint16_t vchan, num_vchan; + uint16_t num_dma_dev; + int16_t dma_dev_id; + uint16_t index; + bool done; + bool err; + + nb_deq = 0; + edma_ops_buffer_flush(adapter); + + num_dma_dev = rte_dma_count_avail(); + do { + done = true; + + for (dma_dev_id = adapter->next_dmadev_id; dma_dev_id < num_dma_dev; dma_dev_id++) { + uint16_t queues = 0; + dev_info = &adapter->dma_devs[dma_dev_id]; + num_vchan = dev_info->num_vchanq; + + for (vchan = dev_info->next_vchan_id; queues < num_vchan; + vchan = (vchan + 1) % num_vchan, queues++) { + + vchan_info = &dev_info->vchanq[vchan]; + if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled)) + continue; + + n = rte_dma_completed(dma_dev_id, vchan, DMA_BATCH_SIZE, + &index, &err); + if (!n) + continue; + + done = false; + stats->dma_deq_count += n; + + tq_buf = &dev_info->tqmap[vchan].dma_buf; + + nb_enqueued = n; + if (unlikely(!adapter->ebuf.count)) + edma_circular_buffer_flush_to_evdev(adapter, tq_buf, + &nb_enqueued); + + if (likely(nb_enqueued == n)) + goto check; + + /* Failed to enqueue events case */ + for (i = nb_enqueued; i < n; i++) { + ops = tq_buf->op_buffer[tq_buf->head]; + edma_circular_buffer_add(&adapter->ebuf, ops); + tq_buf->head = (tq_buf->head + 1) % tq_buf->size; + } + +check: + nb_deq += n; + if (nb_deq >= max_deq) { + if ((vchan + 1) == num_vchan) + adapter->next_dmadev_id = + (dma_dev_id + 1) % num_dma_dev; + + dev_info->next_vchan_id = (vchan + 1) % num_vchan; + + return nb_deq; + } + } + } + adapter->next_dmadev_id = 0; + + } while (done == false); + + return nb_deq; +} + +static int +edma_adapter_run(struct event_dma_adapter *adapter, unsigned int max_ops) +{ + unsigned int ops_left = max_ops; + + while (ops_left > 0) { + unsigned int e_cnt, d_cnt; + + e_cnt = edma_adapter_deq_run(adapter, ops_left); + ops_left -= RTE_MIN(ops_left, e_cnt); + + d_cnt = edma_adapter_enq_run(adapter, ops_left); + ops_left -= RTE_MIN(ops_left, d_cnt); + + if (e_cnt == 0 && d_cnt == 0) + break; + } + + if (ops_left == max_ops) { + rte_event_maintain(adapter->eventdev_id, adapter->event_port_id, 0); + return -EAGAIN; + } else + return 0; +} + +static int +edma_service_func(void *args) +{ + struct event_dma_adapter *adapter = args; + int ret; + + if (rte_spinlock_trylock(&adapter->lock) == 0) + return 0; + ret = edma_adapter_run(adapter, adapter->max_nb); + rte_spinlock_unlock(&adapter->lock); + + return ret; +} + +static int +edma_init_service(struct event_dma_adapter *adapter, uint8_t id) +{ + struct rte_event_dma_adapter_conf adapter_conf; + struct rte_service_spec service; + uint32_t impl_rel; + int ret; + + if (adapter->service_initialized) + return 0; + + memset(&service, 0, sizeof(service)); + snprintf(service.name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id); + service.socket_id = adapter->socket_id; + service.callback = edma_service_func; + service.callback_userdata = adapter; + + /* Service function handles locking for queue add/del updates */ + service.capabilities = RTE_SERVICE_CAP_MT_SAFE; + ret = rte_service_component_register(&service, &adapter->service_id); + if (ret) { + RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, service.name, ret); + return ret; + } + + ret = adapter->conf_cb(id, adapter->eventdev_id, &adapter_conf, adapter->conf_arg); + if (ret) { + RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, ret); + return ret; + } + + adapter->max_nb = adapter_conf.max_nb; + adapter->event_port_id = adapter_conf.event_port_id; + + if (rte_event_port_attr_get(adapter->eventdev_id, adapter->event_port_id, + RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE, &impl_rel)) { + RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32, + adapter->eventdev_id); + edma_circular_buffer_free(&adapter->ebuf); + rte_free(adapter); + return -EINVAL; + } + + adapter->implicit_release_disabled = (uint8_t)impl_rel; + adapter->service_initialized = 1; + + return ret; +} + +static void +edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_info *dev_info, + uint16_t vchan, uint8_t add) +{ + struct dma_vchan_info *vchan_info; + struct dma_vchan_info *tqmap_info; + int enabled; + uint16_t i; + + if (dev_info->vchanq == NULL) + return; + + if (vchan == RTE_DMA_ALL_VCHAN) { + for (i = 0; i < dev_info->num_dma_dev_vchan; i++) + edma_update_vchanq_info(adapter, dev_info, i, add); + } else { + tqmap_info = &dev_info->tqmap[vchan]; + vchan_info = &dev_info->vchanq[vchan]; + enabled = vchan_info->vq_enabled; + if (add) { + adapter->nb_vchanq += !enabled; + dev_info->num_vchanq += !enabled; + } else { + adapter->nb_vchanq -= enabled; + dev_info->num_vchanq -= enabled; + } + vchan_info->vq_enabled = !!add; + tqmap_info->vq_enabled = !!add; + } +} + +static int +edma_add_vchan(struct event_dma_adapter *adapter, int16_t dma_dev_id, uint16_t vchan) +{ + struct dma_device_info *dev_info = &adapter->dma_devs[dma_dev_id]; + struct dma_vchan_info *vchanq; + struct dma_vchan_info *tqmap; + uint16_t nb_vchans; + uint32_t i; + + if (dev_info->vchanq == NULL) { + nb_vchans = dev_info->num_dma_dev_vchan; + + dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name, + nb_vchans * sizeof(struct dma_vchan_info), + 0, adapter->socket_id); + if (dev_info->vchanq == NULL) + return -ENOMEM; + + dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name, + nb_vchans * sizeof(struct dma_vchan_info), + 0, adapter->socket_id); + if (dev_info->tqmap == NULL) + return -ENOMEM; + + for (i = 0; i < nb_vchans; i++) { + vchanq = &dev_info->vchanq[i]; + + if (edma_circular_buffer_init("dma_dev_circular_buffer", &vchanq->dma_buf, + DMA_ADAPTER_OPS_BUFFER_SIZE)) { + RTE_EDEV_LOG_ERR("Failed to get memory for dma_dev buffer"); + rte_free(vchanq); + return -ENOMEM; + } + + tqmap = &dev_info->tqmap[i]; + if (edma_circular_buffer_init("dma_dev_circular_trans_buf", &tqmap->dma_buf, + DMA_ADAPTER_OPS_BUFFER_SIZE)) { + RTE_EDEV_LOG_ERR( + "Failed to get memory for dma_dev transaction buffer"); + rte_free(tqmap); + return -ENOMEM; + } + } + } + + if (vchan == RTE_DMA_ALL_VCHAN) { + for (i = 0; i < dev_info->num_dma_dev_vchan; i++) + edma_update_vchanq_info(adapter, dev_info, i, 1); + } else + edma_update_vchanq_info(adapter, dev_info, vchan, 1); + + return 0; +} + +int +rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan, + const struct rte_event *event) +{ + struct event_dma_adapter *adapter; + struct dma_device_info *dev_info; + struct rte_eventdev *dev; + uint32_t cap; + int ret; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + if (!rte_dma_is_valid(dma_dev_id)) { + RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id); + return -EINVAL; + } + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + dev = &rte_eventdevs[adapter->eventdev_id]; + ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap); + if (ret) { + RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %u dma_dev %u", id, dma_dev_id); + return ret; + } + + if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) && (event == NULL)) { + RTE_EDEV_LOG_ERR("Event can not be NULL for dma_dev_id = %u", dma_dev_id); + return -EINVAL; + } + + dev_info = &adapter->dma_devs[dma_dev_id]; + if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) { + RTE_EDEV_LOG_ERR("Invalid vhcan %u", vchan); + return -EINVAL; + } + + /* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, no + * need of service core as HW supports event forward capability. + */ + if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || + (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND && + adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) || + (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && + adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) { + if (*dev->dev_ops->dma_adapter_vchan_add == NULL) + return -ENOTSUP; + if (dev_info->vchanq == NULL) { + dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name, + dev_info->num_dma_dev_vchan * + sizeof(struct dma_vchan_info), + 0, adapter->socket_id); + if (dev_info->vchanq == NULL) { + printf("Queue pair add not supported\n"); + return -ENOMEM; + } + } + + if (dev_info->tqmap == NULL) { + dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name, + dev_info->num_dma_dev_vchan * + sizeof(struct dma_vchan_info), + 0, adapter->socket_id); + if (dev_info->tqmap == NULL) { + printf("tq pair add not supported\n"); + return -ENOMEM; + } + } + + ret = (*dev->dev_ops->dma_adapter_vchan_add)(dev, dma_dev_id, vchan, event); + if (ret) + return ret; + + else + edma_update_vchanq_info(adapter, &adapter->dma_devs[dma_dev_id], vchan, 1); + } + + /* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, or SW adapter, initiate + * services so the application can choose which ever way it wants to use the adapter. + * + * Case 1: RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW. Application may wants to use one + * of below two modes + * + * a. OP_FORWARD mode -> HW Dequeue + SW enqueue + * b. OP_NEW mode -> HW Dequeue + * + * Case 2: No HW caps, use SW adapter + * + * a. OP_FORWARD mode -> SW enqueue & dequeue + * b. OP_NEW mode -> SW Dequeue + */ + if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && + !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && + adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) || + (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) && + !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && + !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))) { + rte_spinlock_lock(&adapter->lock); + ret = edma_init_service(adapter, id); + if (ret == 0) + ret = edma_add_vchan(adapter, dma_dev_id, vchan); + rte_spinlock_unlock(&adapter->lock); + + if (ret) + return ret; + + rte_service_component_runstate_set(adapter->service_id, 1); + } + + return 0; +} + +int +rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dma_dev_id, uint16_t vchan) +{ + struct event_dma_adapter *adapter; + struct dma_device_info *dev_info; + struct rte_eventdev *dev; + uint32_t cap; + int ret; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + if (!rte_dma_is_valid(dma_dev_id)) { + RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id); + return -EINVAL; + } + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + dev = &rte_eventdevs[adapter->eventdev_id]; + ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap); + if (ret) + return ret; + + dev_info = &adapter->dma_devs[dma_dev_id]; + + if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) { + RTE_EDEV_LOG_ERR("Invalid vhcan %" PRIu16, vchan); + return -EINVAL; + } + + if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || + (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && + adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) { + if (*dev->dev_ops->dma_adapter_vchan_del == NULL) + return -ENOTSUP; + ret = (*dev->dev_ops->dma_adapter_vchan_del)(dev, dma_dev_id, vchan); + if (ret == 0) { + edma_update_vchanq_info(adapter, dev_info, vchan, 0); + if (dev_info->num_vchanq == 0) { + rte_free(dev_info->vchanq); + dev_info->vchanq = NULL; + } + } + } else { + if (adapter->nb_vchanq == 0) + return 0; + + rte_spinlock_lock(&adapter->lock); + edma_update_vchanq_info(adapter, dev_info, vchan, 0); + + if (dev_info->num_vchanq == 0) { + rte_free(dev_info->vchanq); + rte_free(dev_info->tqmap); + dev_info->vchanq = NULL; + dev_info->tqmap = NULL; + } + + rte_spinlock_unlock(&adapter->lock); + rte_service_component_runstate_set(adapter->service_id, adapter->nb_vchanq); + } + + return ret; +} + +int +rte_event_dma_adapter_service_id_get(uint8_t id, uint32_t *service_id) +{ + struct event_dma_adapter *adapter; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = edma_id_to_adapter(id); + if (adapter == NULL || service_id == NULL) + return -EINVAL; + + if (adapter->service_initialized) + *service_id = adapter->service_id; + + return adapter->service_initialized ? 0 : -ESRCH; +} + +static int +edma_adapter_ctrl(uint8_t id, int start) +{ + struct event_dma_adapter *adapter; + struct dma_device_info *dev_info; + struct rte_eventdev *dev; + uint16_t num_dma_dev; + int stop = !start; + int use_service; + uint32_t i; + + use_service = 0; + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + num_dma_dev = rte_dma_count_avail(); + dev = &rte_eventdevs[adapter->eventdev_id]; + + for (i = 0; i < num_dma_dev; i++) { + dev_info = &adapter->dma_devs[i]; + /* start check for num queue pairs */ + if (start && !dev_info->num_vchanq) + continue; + /* stop check if dev has been started */ + if (stop && !dev_info->dev_started) + continue; + use_service |= !dev_info->internal_event_port; + dev_info->dev_started = start; + if (dev_info->internal_event_port == 0) + continue; + start ? (*dev->dev_ops->dma_adapter_start)(dev, i) : + (*dev->dev_ops->dma_adapter_stop)(dev, i); + } + + if (use_service) + rte_service_runstate_set(adapter->service_id, start); + + return 0; +} + +int +rte_event_dma_adapter_start(uint8_t id) +{ + struct event_dma_adapter *adapter; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + return edma_adapter_ctrl(id, 1); +} + +int +rte_event_dma_adapter_stop(uint8_t id) +{ + return edma_adapter_ctrl(id, 0); +} + +#define DEFAULT_MAX_NB 128 + +int +rte_event_dma_adapter_runtime_params_init(struct rte_event_dma_adapter_runtime_params *params) +{ + if (params == NULL) + return -EINVAL; + + memset(params, 0, sizeof(*params)); + params->max_nb = DEFAULT_MAX_NB; + + return 0; +} + +static int +dma_adapter_cap_check(struct event_dma_adapter *adapter) +{ + uint32_t caps; + int ret; + + if (!adapter->nb_vchanq) + return -EINVAL; + + ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, adapter->next_dmadev_id, &caps); + if (ret) { + RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8 " cdev %" PRIu8, + adapter->eventdev_id, adapter->next_dmadev_id); + return ret; + } + + if ((caps & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || + (caps & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) + return -ENOTSUP; + + return 0; +} + +int +rte_event_dma_adapter_runtime_params_set(uint8_t id, + struct rte_event_dma_adapter_runtime_params *params) +{ + struct event_dma_adapter *adapter; + int ret; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + if (params == NULL) { + RTE_EDEV_LOG_ERR("params pointer is NULL\n"); + return -EINVAL; + } + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + ret = dma_adapter_cap_check(adapter); + if (ret) + return ret; + + rte_spinlock_lock(&adapter->lock); + adapter->max_nb = params->max_nb; + rte_spinlock_unlock(&adapter->lock); + + return 0; +} + +int +rte_event_dma_adapter_runtime_params_get(uint8_t id, + struct rte_event_dma_adapter_runtime_params *params) +{ + struct event_dma_adapter *adapter; + int ret; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + if (params == NULL) { + RTE_EDEV_LOG_ERR("params pointer is NULL\n"); + return -EINVAL; + } + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + ret = dma_adapter_cap_check(adapter); + if (ret) + return ret; + + params->max_nb = adapter->max_nb; + + return 0; +} + +int +rte_event_dma_adapter_stats_get(uint8_t id, struct rte_event_dma_adapter_stats *stats) +{ + struct rte_event_dma_adapter_stats dev_stats_sum = {0}; + struct rte_event_dma_adapter_stats dev_stats; + struct event_dma_adapter *adapter; + struct dma_device_info *dev_info; + struct rte_eventdev *dev; + uint16_t num_dma_dev; + uint32_t i; + int ret; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = edma_id_to_adapter(id); + if (adapter == NULL || stats == NULL) + return -EINVAL; + + num_dma_dev = rte_dma_count_avail(); + dev = &rte_eventdevs[adapter->eventdev_id]; + memset(stats, 0, sizeof(*stats)); + for (i = 0; i < num_dma_dev; i++) { + dev_info = &adapter->dma_devs[i]; + + if (dev_info->internal_event_port == 0 || + dev->dev_ops->dma_adapter_stats_get == NULL) + continue; + + ret = (*dev->dev_ops->dma_adapter_stats_get)(dev, i, &dev_stats); + if (ret) + continue; + + dev_stats_sum.dma_deq_count += dev_stats.dma_deq_count; + dev_stats_sum.event_enq_count += dev_stats.event_enq_count; + } + + if (adapter->service_initialized) + *stats = adapter->dma_stats; + + stats->dma_deq_count += dev_stats_sum.dma_deq_count; + stats->event_enq_count += dev_stats_sum.event_enq_count; + + return 0; +} + +int +rte_event_dma_adapter_stats_reset(uint8_t id) +{ + struct event_dma_adapter *adapter; + struct dma_device_info *dev_info; + struct rte_eventdev *dev; + uint16_t num_dma_dev; + uint32_t i; + + EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = edma_id_to_adapter(id); + if (adapter == NULL) + return -EINVAL; + + num_dma_dev = rte_dma_count_avail(); + dev = &rte_eventdevs[adapter->eventdev_id]; + for (i = 0; i < num_dma_dev; i++) { + dev_info = &adapter->dma_devs[i]; + + if (dev_info->internal_event_port == 0 || + dev->dev_ops->dma_adapter_stats_reset == NULL) + continue; + + (*dev->dev_ops->dma_adapter_stats_reset)(dev, i); + } + + memset(&adapter->dma_stats, 0, sizeof(adapter->dma_stats)); + + return 0; +} + +uint16_t +rte_event_dma_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], + uint16_t nb_events) +{ + const struct rte_event_fp_ops *fp_ops; + void *port; + + fp_ops = &rte_event_fp_ops[dev_id]; + port = fp_ops->data[port_id]; + + return fp_ops->dma_enqueue(port, ev, nb_events); +} diff --git a/lib/eventdev/rte_event_dma_adapter.h b/lib/eventdev/rte_event_dma_adapter.h new file mode 100644 index 00000000000..e924ab673df --- /dev/null +++ b/lib/eventdev/rte_event_dma_adapter.h @@ -0,0 +1,581 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Marvell. + */ + +#ifndef RTE_EVENT_DMA_ADAPTER +#define RTE_EVENT_DMA_ADAPTER + +/** + * @file rte_event_dma_adapter.h + * + * @warning + * @b EXPERIMENTAL: + * All functions in this file may be changed or removed without prior notice. + * + * DMA Event Adapter API. + * + * Eventdev library provides adapters to bridge between various components for providing new + * event source. The event DMA adapter is one of those adapters which is intended to bridge + * between event devices and DMA devices. + * + * The DMA adapter adds support to enqueue / dequeue DMA operations to / from event device. The + * packet flow between DMA device and the event device can be accomplished using both SW and HW + * based transfer mechanisms. The adapter uses an EAL service core function for SW based packet + * transfer and uses the eventdev PMD functions to configure HW based packet transfer between the + * DMA device and the event device. + * + * The application can choose to submit a DMA operation directly to an DMA device or send it to the + * DMA adapter via eventdev based on RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability. The + * first mode is known as the event new (RTE_EVENT_DMA_ADAPTER_OP_NEW) mode and the second as the + * event forward (RTE_EVENT_DMA_ADAPTER_OP_FORWARD) mode. The choice of mode can be specified while + * creating the adapter. In the former mode, it is an application responsibility to enable ingress + * packet ordering. In the latter mode, it is the adapter responsibility to enable the ingress + * packet ordering. + * + * + * Working model of RTE_EVENT_DMA_ADAPTER_OP_NEW mode: + * + * +--------------+ +--------------+ + * | | | DMA stage | + * | Application |---[2]-->| + enqueue to | + * | | | dmadev | + * +--------------+ +--------------+ + * ^ ^ | + * | | [3] + * [6] [1] | + * | | | + * +--------------+ | + * | | | + * | Event device | | + * | | | + * +--------------+ | + * ^ | + * | | + * [5] | + * | v + * +--------------+ +--------------+ + * | | | | + * | DMA adapter |<--[4]---| dmadev | + * | | | | + * +--------------+ +--------------+ + * + * + * [1] Application dequeues events from the previous stage. + * [2] Application prepares the DMA operations. + * [3] DMA operations are submitted to dmadev by application. + * [4] DMA adapter dequeues DMA completions from dmadev. + * [5] DMA adapter enqueues events to the eventdev. + * [6] Application dequeues from eventdev for further processing. + * + * In the RTE_EVENT_DMA_ADAPTER_OP_NEW mode, application submits DMA operations directly to DMA + * device. The DMA adapter then dequeues DMA completions from DMA device and enqueue events to the + * event device. This mode does not ensure ingress ordering, if the application directly enqueues + * to dmadev without going through DMA / atomic stage i.e. removing item [1] and [2]. + * + * Events dequeued from the adapter will be treated as new events. In this mode, application needs + * to specify event information (response information) which is needed to enqueue an event after the + * DMA operation is completed. + * + * + * Working model of RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode: + * + * +--------------+ +--------------+ + * --[1]-->| |---[2]-->| Application | + * | Event device | | in | + * <--[8]--| |<--[3]---| Ordered stage| + * +--------------+ +--------------+ + * ^ | + * | [4] + * [7] | + * | v + * +----------------+ +--------------+ + * | |--[5]->| | + * | DMA adapter | | dmadev | + * | |<-[6]--| | + * +----------------+ +--------------+ + * + * + * [1] Events from the previous stage. + * [2] Application in ordered stage dequeues events from eventdev. + * [3] Application enqueues DMA operations as events to eventdev. + * [4] DMA adapter dequeues event from eventdev. + * [5] DMA adapter submits DMA operations to dmadev (Atomic stage). + * [6] DMA adapter dequeues DMA completions from dmadev + * [7] DMA adapter enqueues events to the eventdev + * [8] Events to the next stage + * + * In the event forward (RTE_EVENT_DMA_ADAPTER_OP_FORWARD) mode, if the HW supports the capability + * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, application can directly submit the DMA + * operations to the dmadev. If not, application retrieves the event port of the DMA adapter + * through the API, rte_event_DMA_adapter_event_port_get(). Then, links its event queue to this + * port and starts enqueuing DMA operations as events to the eventdev. The adapter then dequeues + * the events and submits the DMA operations to the dmadev. After the DMA completions, the adapter + * enqueues events to the event device. + * + * Application can use this mode, when ingress packet ordering is needed. Events dequeued from the + * adapter will be treated as forwarded events. In this mode, the application needs to specify the + * dmadev ID and queue pair ID (request information) needed to enqueue an DMA operation in addition + * to the event information (response information) needed to enqueue an event after the DMA + * operation has completed. + * + * The event DMA adapter provides common APIs to configure the packet flow from the DMA device to + * event devices for both SW and HW based transfers. The DMA event adapter's functions are: + * + * - rte_event_dma_adapter_create_ext() + * - rte_event_dma_adapter_create() + * - rte_event_dma_adapter_free() + * - rte_event_dma_adapter_vchan_add() + * - rte_event_dma_adapter_vchan_del() + * - rte_event_dma_adapter_start() + * - rte_event_dma_adapter_stop() + * - rte_event_dma_adapter_stats_get() + * - rte_event_dma_adapter_stats_reset() + * + * The application creates an instance using rte_event_dma_adapter_create() or + * rte_event_dma_adapter_create_ext(). + * + * dmadev queue pair addition / deletion is done using the rte_event_dma_adapter_vchan_add() / + * rte_event_dma_adapter_vchan_del() APIs. If HW supports the capability + * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND, event information must be passed to the + * add API. + * + */ + +#include + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * A structure used to hold event based DMA operation entry. All the information + * required for a DMA transfer shall be populated in "struct rte_event_dma_adapter_op" + * instance. + */ +struct rte_event_dma_adapter_op { + struct rte_dma_sge *src_seg; + /**< Source segments. */ + struct rte_dma_sge *dst_seg; + /**< Destination segments. */ + uint16_t nb_src; + /**< Number of source segments. */ + uint16_t nb_dst; + /**< Number of destination segments. */ + uint64_t flags; + /**< Flags related to the operation. + * @see RTE_DMA_OP_FLAG_* + */ + int16_t dma_dev_id; + /**< DMA device ID to be used */ + uint16_t vchan; + /**< DMA vchan ID to be used */ + struct rte_mempool *op_mp; + /**< Mempool from which op is allocated. */ +}; + +/** + * DMA event adapter mode + */ +enum rte_event_dma_adapter_mode { + RTE_EVENT_DMA_ADAPTER_OP_NEW, + /**< Start the DMA adapter in event new mode. + * @see RTE_EVENT_OP_NEW. + * + * Application submits DMA operations to the dmadev. Adapter only dequeues the DMA + * completions from dmadev and enqueue events to the eventdev. + */ + + RTE_EVENT_DMA_ADAPTER_OP_FORWARD, + /**< Start the DMA adapter in event forward mode. + * @see RTE_EVENT_OP_FORWARD. + * + * Application submits DMA requests as events to the DMA adapter or DMA device based on + * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability. DMA completions are enqueued + * back to the eventdev by DMA adapter. + */ +}; + +/** + * Adapter configuration structure that the adapter configuration callback function is expected to + * fill out. + * + * @see rte_event_dma_adapter_conf_cb + */ +struct rte_event_dma_adapter_conf { + uint8_t event_port_id; + /** < Event port identifier, the adapter enqueues events to this port and dequeues DMA + * request events in RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode. + */ + + uint32_t max_nb; + /**< The adapter can return early if it has processed at least max_nb DMA ops. This isn't + * treated as a requirement; batching may cause the adapter to process more than max_nb DMA + * ops. + */ +}; + +/** + * Adapter runtime configuration parameters + */ +struct rte_event_dma_adapter_runtime_params { + uint32_t max_nb; + /**< The adapter can return early if it has processed at least max_nb DMA ops. This isn't + * treated as a requirement; batching may cause the adapter to process more than max_nb DMA + * ops. + * + * Callback function passed to rte_event_dma_adapter_create_ext() configures the adapter + * with default value of max_nb. + * rte_event_dma_adapter_runtime_params_set() allows to re-configure max_nb during runtime + * (after adding at least one queue pair) + * + * This is valid for the devices without RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD or + * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW capability. + */ + + uint32_t rsvd[15]; + /**< Reserved fields for future expansion */ +}; + +/** + * Function type used for adapter configuration callback. The callback is used to fill in members of + * the struct rte_event_dma_adapter_conf, this callback is invoked when creating a SW service for + * packet transfer from dmadev vchan to the event device. The SW service is created within the + * function, rte_event_dma_adapter_vchan_add(), if SW based packet transfers from dmadev vchan + * to the event device are required. + * + * @param id + * Adapter identifier. + * @param evdev_id + * Event device identifier. + * @param conf + * Structure that needs to be populated by this callback. + * @param arg + * Argument to the callback. This is the same as the conf_arg passed to the + * rte_event_dma_adapter_create_ext(). + */ +typedef int (*rte_event_dma_adapter_conf_cb)(uint8_t id, uint8_t evdev_id, + struct rte_event_dma_adapter_conf *conf, void *arg); + +/** + * A structure used to retrieve statistics for an event DMA adapter instance. + */ +struct rte_event_dma_adapter_stats { + uint64_t event_poll_count; + /**< Event port poll count */ + + uint64_t event_deq_count; + /**< Event dequeue count */ + + uint64_t dma_enq_count; + /**< dmadev enqueue count */ + + uint64_t dma_enq_fail_count; + /**< dmadev enqueue failed count */ + + uint64_t dma_deq_count; + /**< dmadev dequeue count */ + + uint64_t event_enq_count; + /**< Event enqueue count */ + + uint64_t event_enq_retry_count; + /**< Event enqueue retry count */ + + uint64_t event_enq_fail_count; + /**< Event enqueue fail count */ +}; + +/** + * Create a new event DMA adapter with the specified identifier. + * + * @param id + * Adapter identifier. + * @param evdev_id + * Event device identifier. + * @param conf_cb + * Callback function that fills in members of a struct rte_event_dma_adapter_conf struct passed + * into it. + * @param mode + * Flag to indicate the mode of the adapter. + * @see rte_event_dma_adapter_mode + * @param conf_arg + * Argument that is passed to the conf_cb function. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id, + rte_event_dma_adapter_conf_cb conf_cb, + enum rte_event_dma_adapter_mode mode, void *conf_arg); + +/** + * Create a new event DMA adapter with the specified identifier. This function uses an internal + * configuration function that creates an event port. This default function reconfigures the event + * device with an additional event port and set up the event port using the port_config parameter + * passed into this function. In case the application needs more control in configuration of the + * service, it should use the rte_event_dma_adapter_create_ext() version. + * + * @param id + * Adapter identifier. + * @param evdev_id + * Event device identifier. + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb function. + * @param mode + * Flag to indicate the mode of the adapter. + * @see rte_event_dma_adapter_mode + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_dma_adapter_create(uint8_t id, uint8_t evdev_id, + struct rte_event_port_conf *port_config, + enum rte_event_dma_adapter_mode mode); + +/** + * Free an event DMA adapter + * + * @param id + * Adapter identifier. + * @return + * - 0: Success + * - <0: Error code on failure, If the adapter still has queue pairs added to it, the function + * returns -EBUSY. + */ +__rte_experimental +int rte_event_dma_adapter_free(uint8_t id); + +/** + * Retrieve the event port of an adapter. + * + * @param id + * Adapter identifier. + * + * @param [out] event_port_id + * Application links its event queue to this adapter port which is used in + * RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode. + * + * @return + * - 0: Success + * - <0: Error code on failure. + */ +__rte_experimental +int rte_event_dma_adapter_event_port_get(uint8_t id, uint8_t *event_port_id); + +/** + * Add a vchan to an event DMA adapter. + * + * @param id + * Adapter identifier. + * @param dmadev_id + * dmadev identifier. + * @param vchan + * DMA device vchan identifier. If vchan is set -1, adapter adds all the + * preconfigured vchan to the instance. + * @param event + * If HW supports dmadev vchan to event queue binding, application is expected to fill in + * event information, else it will be NULL. + * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND + * + * @return + * - 0: Success, vchan added correctly. + * - <0: Error code on failure. + */ +__rte_experimental +int rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dmadev_id, uint16_t vchan, + const struct rte_event *event); + +/** + * Delete a vchan from an event DMA adapter. + * + * @param id + * Adapter identifier. + * @param dmadev_id + * DMA device identifier. + * @param vchan + * DMA device vchan identifier. + * + * @return + * - 0: Success, vchan deleted successfully. + * - <0: Error code on failure. + */ +__rte_experimental +int rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dmadev_id, uint16_t vchan); + +/** + * Retrieve the service ID of an adapter. If the adapter doesn't use a rte_service function, this + * function returns -ESRCH. + * + * @param id + * Adapter identifier. + * @param [out] service_id + * A pointer to a uint32_t, to be filled in with the service id. + * + * @return + * - 0: Success + * - <0: Error code on failure, if the adapter doesn't use a rte_service function, this function + * returns -ESRCH. + */ +__rte_experimental +int rte_event_dma_adapter_service_id_get(uint8_t id, uint32_t *service_id); + +/** + * Start event DMA adapter + * + * @param id + * Adapter identifier. + * + * @return + * - 0: Success, adapter started successfully. + * - <0: Error code on failure. + * + * @note The eventdev and dmadev to which the event_dma_adapter is connected should be started + * before calling rte_event_dma_adapter_start(). + */ +__rte_experimental +int rte_event_dma_adapter_start(uint8_t id); + +/** + * Stop event DMA adapter + * + * @param id + * Adapter identifier. + * + * @return + * - 0: Success, adapter stopped successfully. + * - <0: Error code on failure. + */ +__rte_experimental +int rte_event_dma_adapter_stop(uint8_t id); + +/** + * Initialize the adapter runtime configuration parameters + * + * @param params + * A pointer to structure of type struct rte_event_dma_adapter_runtime_params + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_dma_adapter_runtime_params_init(struct rte_event_dma_adapter_runtime_params *params); + +/** + * Set the adapter runtime configuration parameters + * + * @param id + * Adapter identifier + * + * @param params + * A pointer to structure of type struct rte_event_dma_adapter_runtime_params with configuration + * parameter values. The reserved fields of this structure must be initialized to zero and the valid + * fields need to be set appropriately. This struct can be initialized using + * rte_event_dma_adapter_runtime_params_init() API to default values or application may reset this + * struct and update required fields. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_dma_adapter_runtime_params_set(uint8_t id, + struct rte_event_dma_adapter_runtime_params *params); + +/** + * Get the adapter runtime configuration parameters + * + * @param id + * Adapter identifier + * + * @param[out] params + * A pointer to structure of type struct rte_event_dma_adapter_runtime_params containing valid + * adapter parameters when return value is 0. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_dma_adapter_runtime_params_get(uint8_t id, + struct rte_event_dma_adapter_runtime_params *params); + +/** + * Retrieve statistics for an adapter + * + * @param id + * Adapter identifier. + * @param [out] stats + * A pointer to structure used to retrieve statistics for an adapter. + * + * @return + * - 0: Success, retrieved successfully. + * - <0: Error code on failure. + */ +__rte_experimental +int rte_event_dma_adapter_stats_get(uint8_t id, struct rte_event_dma_adapter_stats *stats); + +/** + * Reset statistics for an adapter. + * + * @param id + * Adapter identifier. + * + * @return + * - 0: Success, statistics reset successfully. + * - <0: Error code on failure. + */ +__rte_experimental +int rte_event_dma_adapter_stats_reset(uint8_t id); + +/** + * Enqueue a burst of DMA operations as event objects supplied in *rte_event* structure on an event + * DMA adapter designated by its event *evdev_id* through the event port specified by *port_id*. + * This function is supported if the eventdev PMD has the + * #RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability flag set. + * + * The *nb_events* parameter is the number of event objects to enqueue that are supplied in the + * *ev* array of *rte_event* structure. + * + * The rte_event_dma_adapter_enqueue() function returns the number of event objects it actually + * enqueued. A return value equal to *nb_events* means that all event objects have been enqueued. + * + * @param evdev_id + * The identifier of the device. + * @param port_id + * The identifier of the event port. + * @param ev + * Points to an array of *nb_events* objects of type *rte_event* structure which contain the + * event object enqueue operations to be processed. + * @param nb_events + * The number of event objects to enqueue, typically number of + * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) available for this port. + * + * @return + * The number of event objects actually enqueued on the event device. The return value can be + * less than the value of the *nb_events* parameter when the event devices queue is full or if + * invalid parameters are specified in a *rte_event*. If the return value is less than *nb_events*, + * the remaining events at the end of ev[] are not consumed and the caller has to take care of them, + * and rte_errno is set accordingly. Possible errno values include: + * - EINVAL: The port ID is invalid, device ID is invalid, an event's queue ID is invalid, or an + * event's sched type doesn't match the capabilities of the destination queue. + * - ENOSPC: The event port was backpressured and unable to enqueue one or more events. This + * error code is only applicable to closed systems. + */ +__rte_experimental +uint16_t rte_event_dma_adapter_enqueue(uint8_t evdev_id, uint8_t port_id, struct rte_event ev[], + uint16_t nb_events); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_EVENT_DMA_ADAPTER */ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 5c93967512e..6db03adf046 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -4,12 +4,14 @@ */ #include #include -#if defined(LINUX) +#include +#if defined(__linux__) #include #endif #include #include +#include #include #include #include @@ -165,7 +167,7 @@ struct event_eth_rx_adapter { /* Count of interrupt vectors in use */ uint32_t num_intr_vec; /* Thread blocked on Rx interrupts */ - pthread_t rx_intr_thread; + rte_thread_t rx_intr_thread; /* Configuration callback for rte_service configuration */ rte_event_eth_rx_adapter_conf_cb conf_cb; /* Configuration callback argument */ @@ -1154,13 +1156,13 @@ rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter, rte_spinlock_unlock(&rx_adapter->intr_ring_lock); } -/* pthread callback handling interrupt mode receive queues +/* thread callback handling interrupt mode receive queues * After receiving an Rx interrupt, it enqueues the port id and queue id of the * interrupting queue to the adapter's ring buffer for interrupt events. * These events are picked up by rxa_intr_ring_dequeue() which is invoked from * the adapter service function. */ -static void * +static uint32_t rxa_intr_thread(void *arg) { struct event_eth_rx_adapter *rx_adapter = arg; @@ -1179,7 +1181,7 @@ rxa_intr_thread(void *arg) } } - return NULL; + return 0; } /* Dequeue from interrupt ring and enqueue received @@ -1565,11 +1567,11 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id, static int rxa_epoll_create1(void) { -#if defined(LINUX) +#if defined(__linux__) int fd; fd = epoll_create1(EPOLL_CLOEXEC); return fd < 0 ? -errno : fd; -#elif defined(BSD) +#else return -ENOTSUP; #endif } @@ -1595,7 +1597,7 @@ static int rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter) { int err; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; + char thread_name[RTE_THREAD_INTERNAL_NAME_SIZE]; if (rx_adapter->intr_ring) return 0; @@ -1618,11 +1620,11 @@ rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter) rte_spinlock_init(&rx_adapter->intr_ring_lock); - snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, - "rx-intr-thread-%d", rx_adapter->id); + snprintf(thread_name, sizeof(thread_name), + "evt-rx%d", rx_adapter->id); - err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name, - NULL, rxa_intr_thread, rx_adapter); + err = rte_thread_create_internal_control(&rx_adapter->rx_intr_thread, + thread_name, rxa_intr_thread, rx_adapter); if (!err) return 0; @@ -1640,12 +1642,12 @@ rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter) { int err; - err = pthread_cancel(rx_adapter->rx_intr_thread); + err = pthread_cancel((pthread_t)rx_adapter->rx_intr_thread.opaque_id); if (err) RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n", err); - err = pthread_join(rx_adapter->rx_intr_thread, NULL); + err = rte_thread_join(rx_adapter->rx_intr_thread, NULL); if (err) RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err); @@ -1910,6 +1912,13 @@ rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id) if (rx_adapter->service_inited) return 0; + if (rte_mbuf_dyn_rx_timestamp_register( + &event_eth_rx_timestamp_dynfield_offset, + &event_eth_rx_timestamp_dynflag) != 0) { + RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); + return -rte_errno; + } + memset(&service, 0, sizeof(service)); snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN, "rte_event_eth_rx_adapter_%d", id); @@ -2472,18 +2481,45 @@ rxa_create(uint8_t id, uint8_t dev_id, if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; - if (rte_mbuf_dyn_rx_timestamp_register( - &event_eth_rx_timestamp_dynfield_offset, - &event_eth_rx_timestamp_dynflag) != 0) { - RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); - return -rte_errno; - } - rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, conf_arg); return 0; } +static int +rxa_config_params_validate(struct rte_event_eth_rx_adapter_params *rxa_params, + struct rte_event_eth_rx_adapter_params *temp_params) +{ + if (rxa_params == NULL) { + /* use default values if rxa_params is NULL */ + temp_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + temp_params->use_queue_event_buf = false; + return 0; + } else if (!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0) { + RTE_EDEV_LOG_ERR("event buffer size can't be zero\n"); + return -EINVAL; + } else if (rxa_params->use_queue_event_buf && + rxa_params->event_buf_size != 0) { + RTE_EDEV_LOG_ERR("event buffer size needs to be configured " + "as part of queue add\n"); + return -EINVAL; + } + + *temp_params = *rxa_params; + /* adjust event buff size with BATCH_SIZE used for fetching + * packets from NIC rx queues to get full buffer utilization + * and prevent unnecessary rollovers. + */ + if (!temp_params->use_queue_event_buf) { + temp_params->event_buf_size = + RTE_ALIGN(temp_params->event_buf_size, BATCH_SIZE); + temp_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + } + + return 0; +} + int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_event_eth_rx_adapter_conf_cb conf_cb, @@ -2510,27 +2546,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; - if (rxa_params == NULL) { - /* use default values if rxa_params is NULL */ - rxa_params = &temp_params; - rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - rxa_params->use_queue_event_buf = false; - } else if ((!rxa_params->use_queue_event_buf && - rxa_params->event_buf_size == 0) || - (rxa_params->use_queue_event_buf && - rxa_params->event_buf_size != 0)) { - RTE_EDEV_LOG_ERR("Invalid adapter params\n"); - return -EINVAL; - } else if (!rxa_params->use_queue_event_buf) { - /* adjust event buff size with BATCH_SIZE used for fetching - * packets from NIC rx queues to get full buffer utilization - * and prevent unnecessary rollovers. - */ - - rxa_params->event_buf_size = - RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); - } + ret = rxa_config_params_validate(rxa_params, &temp_params); + if (ret != 0) + return ret; pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) @@ -2538,7 +2556,7 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, *pc = *port_config; - ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + ret = rxa_create(id, dev_id, &temp_params, rxa_default_conf_cb, pc); if (ret) rte_free(pc); @@ -2548,6 +2566,22 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, return ret; } +int +rte_event_eth_rx_adapter_create_ext_with_params(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_eth_rx_adapter_params temp_params = {0}; + int ret; + + ret = rxa_config_params_validate(rxa_params, &temp_params); + if (ret != 0) + return ret; + + return rxa_create(id, dev_id, &temp_params, conf_cb, conf_arg); +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index fe2a6bdd2cd..842581e87e0 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -27,6 +27,7 @@ * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() * - rte_event_eth_rx_adapter_create_with_params() + * - rte_event_eth_rx_adapter_create_ext_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -45,7 +46,8 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * or rte_event_eth_rx_adapter_create_with_params() functions. + * or rte_event_eth_rx_adapter_create_with_params() or + * rte_event_eth_rx_adapter_create_ext_with_params() functions. * * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event @@ -469,6 +471,39 @@ int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config, struct rte_event_eth_rx_adapter_params *rxa_params); +/** + * This is a variant of rte_event_eth_rx_adapter_create_ext() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param conf_cb + * Callback function that fills in members of a + * struct rte_event_eth_rx_adapter_conf struct passed into + * it. + * + * @param conf_arg + * Argument that is passed to the conf_cb function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int +rte_event_eth_rx_adapter_create_ext_with_params(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c index 6ab45243326..95373bbaad6 100644 --- a/lib/eventdev/rte_eventdev.c +++ b/lib/eventdev/rte_eventdev.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -224,6 +225,28 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, : 0; } +int +rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dma_dev_id, uint32_t *caps) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (!rte_dma_is_valid(dma_dev_id)) + return -EINVAL; + + dev = &rte_eventdevs[dev_id]; + + if (caps == NULL) + return -EINVAL; + + *caps = 0; + + if (dev->dev_ops->dma_adapter_caps_get) + return (*dev->dev_ops->dma_adapter_caps_get)(dev, dma_dev_id, caps); + + return 0; +} + static inline int event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) { @@ -270,7 +293,7 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) void **ports; uint16_t *links_map; struct rte_event_port_conf *ports_cfg; - unsigned int i; + unsigned int i, j; RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, dev->data->dev_id); @@ -281,7 +304,6 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) ports = dev->data->ports; ports_cfg = dev->data->ports_cfg; - links_map = dev->data->links_map; for (i = nb_ports; i < old_nb_ports; i++) (*dev->dev_ops->port_release)(ports[i]); @@ -297,9 +319,11 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) sizeof(ports[0]) * new_ps); memset(ports_cfg + old_nb_ports, 0, sizeof(ports_cfg[0]) * new_ps); - for (i = old_links_map_end; i < links_map_end; i++) - links_map[i] = - EVENT_QUEUE_SERVICE_PRIORITY_INVALID; + for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) { + links_map = dev->data->links_map[i]; + for (j = old_links_map_end; j < links_map_end; j++) + links_map[j] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; + } } } else { if (*dev->dev_ops->port_release == NULL) @@ -953,21 +977,45 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links) { - struct rte_eventdev *dev; - uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; + return rte_event_port_profile_links_set(dev_id, port_id, queues, priorities, nb_links, 0); +} + +int +rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id) +{ uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; + struct rte_event_dev_info info; + struct rte_eventdev *dev; uint16_t *links_map; int i, diag; RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; + if (*dev->dev_ops->dev_infos_get == NULL) + return -ENOTSUP; + + (*dev->dev_ops->dev_infos_get)(dev, &info); + if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT || + profile_id >= info.max_profiles_per_port) { + RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id); + return -EINVAL; + } + if (*dev->dev_ops->port_link == NULL) { RTE_EDEV_LOG_ERR("Function not supported\n"); rte_errno = ENOTSUP; return 0; } + if (profile_id && *dev->dev_ops->port_link_profile == NULL) { + RTE_EDEV_LOG_ERR("Function not supported\n"); + rte_errno = ENOTSUP; + return 0; + } + if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); rte_errno = EINVAL; @@ -995,18 +1043,22 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, return 0; } - diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], - queues, priorities, nb_links); + if (profile_id) + diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues, + priorities, nb_links, profile_id); + else + diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues, + priorities, nb_links); if (diag < 0) return diag; - links_map = dev->data->links_map; + links_map = dev->data->links_map[profile_id]; /* Point links_map to this port specific area */ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); for (i = 0; i < diag; i++) links_map[queues[i]] = (uint8_t)priorities[i]; - rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag); + rte_eventdev_trace_port_profile_links_set(dev_id, port_id, nb_links, profile_id, diag); return diag; } @@ -1014,27 +1066,51 @@ int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks) { - struct rte_eventdev *dev; + return rte_event_port_profile_unlink(dev_id, port_id, queues, nb_unlinks, 0); +} + +int +rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], + uint16_t nb_unlinks, uint8_t profile_id) +{ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; - int i, diag, j; + struct rte_event_dev_info info; + struct rte_eventdev *dev; uint16_t *links_map; + int i, diag, j; RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; + if (*dev->dev_ops->dev_infos_get == NULL) + return -ENOTSUP; + + (*dev->dev_ops->dev_infos_get)(dev, &info); + if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT || + profile_id >= info.max_profiles_per_port) { + RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id); + return -EINVAL; + } + if (*dev->dev_ops->port_unlink == NULL) { RTE_EDEV_LOG_ERR("Function not supported"); rte_errno = ENOTSUP; return 0; } + if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) { + RTE_EDEV_LOG_ERR("Function not supported"); + rte_errno = ENOTSUP; + return 0; + } + if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); rte_errno = EINVAL; return 0; } - links_map = dev->data->links_map; + links_map = dev->data->links_map[profile_id]; /* Point links_map to this port specific area */ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); @@ -1063,16 +1139,19 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, return 0; } - diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], - queues, nb_unlinks); - + if (profile_id) + diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues, + nb_unlinks, profile_id); + else + diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues, + nb_unlinks); if (diag < 0) return diag; for (i = 0; i < diag; i++) links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; - rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag); + rte_eventdev_trace_port_profile_unlink(dev_id, port_id, nb_unlinks, profile_id, diag); return diag; } @@ -1116,7 +1195,8 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, return -EINVAL; } - links_map = dev->data->links_map; + /* Use the default profile_id. */ + links_map = dev->data->links_map[0]; /* Point links_map to this port specific area */ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); for (i = 0; i < dev->data->nb_queues; i++) { @@ -1132,6 +1212,49 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, return count; } +int +rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], + uint8_t priorities[], uint8_t profile_id) +{ + struct rte_event_dev_info info; + struct rte_eventdev *dev; + uint16_t *links_map; + int i, count = 0; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + if (*dev->dev_ops->dev_infos_get == NULL) + return -ENOTSUP; + + (*dev->dev_ops->dev_infos_get)(dev, &info); + if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT || + profile_id >= info.max_profiles_per_port) { + RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id); + return -EINVAL; + } + + if (!is_valid_port(dev, port_id)) { + RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); + return -EINVAL; + } + + links_map = dev->data->links_map[profile_id]; + /* Point links_map to this port specific area */ + links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); + for (i = 0; i < dev->data->nb_queues; i++) { + if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { + queues[count] = i; + priorities[count] = (uint8_t)links_map[i]; + ++count; + } + } + + rte_eventdev_trace_port_profile_links_get(dev_id, port_id, profile_id, count); + + return count; +} + int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks) @@ -1440,7 +1563,7 @@ eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, { char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; const struct rte_memzone *mz; - int n; + int i, n; /* Generate memzone name */ n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); @@ -1460,11 +1583,10 @@ eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, *data = mz->addr; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { memset(*data, 0, sizeof(struct rte_eventdev_data)); - for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * - RTE_EVENT_MAX_QUEUES_PER_DEV; - n++) - (*data)->links_map[n] = - EVENT_QUEUE_SERVICE_PRIORITY_INVALID; + for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) + for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV; + n++) + (*data)->links_map[i][n] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } return 0; diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h index 2ba8a7b0903..2ea98302b89 100644 --- a/lib/eventdev/rte_eventdev.h +++ b/lib/eventdev/rte_eventdev.h @@ -320,6 +320,12 @@ struct rte_event; * rte_event_queue_setup(). */ +#define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12) +/**< Event device is capable of supporting multiple link profiles per event port + * i.e., the value of `rte_event_dev_info::max_profiles_per_port` is greater + * than one. + */ + /* Event device priority levels */ #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 /**< Highest priority expressed across eventdev subsystem @@ -446,6 +452,10 @@ struct rte_event_dev_info { * device. These ports and queues are not accounted for in * max_event_ports or max_event_queues. */ + uint8_t max_profiles_per_port; + /**< Maximum number of event queue profiles per event port. + * A device that doesn't support multiple profiles will set this as 1. + */ }; /** @@ -1197,6 +1207,8 @@ struct rte_event_vector { */ #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 /**< The event generated from event eth Rx adapter */ +#define RTE_EVENT_TYPE_DMADEV 0x5 +/**< The event generated from dma subsystem */ #define RTE_EVENT_TYPE_VECTOR 0x8 /**< Indicates that event is a vector. * All vector event types should be a logical OR of EVENT_TYPE_VECTOR. @@ -1462,6 +1474,48 @@ int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps); +/* DMA adapter capability bitmap flag */ +#define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 +/**< Flag indicates HW is capable of generating events in + * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send + * packets to the event device as new events using an + * internal event port. + */ + +#define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 +/**< Flag indicates HW is capable of generating events in + * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send + * packets to the event device as forwarded event using an + * internal event port. + */ + +#define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4 +/**< Flag indicates HW is capable of mapping DMA vchan to event queue. */ + +/** + * Retrieve the event device's DMA adapter capabilities for the + * specified dmadev device + * + * @param dev_id + * The identifier of the device. + * + * @param dmadev_id + * The identifier of the dmadev device. + * + * @param[out] caps + * A pointer to memory filled with event adapter capabilities. + * It is expected to be pre-allocated & initialized by caller. + * + * @return + * - 0: Success, driver provides event adapter capabilities for the + * dmadev device. + * - <0: Error code returned by the driver function. + * + */ +__rte_experimental +int +rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps); + /* Ethdev Tx adapter capability bitmap flags */ #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1 /**< This flag is sent when the PMD supports a packet transmit callback @@ -1536,6 +1590,10 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, * latency of critical work by establishing the link with more event ports * at runtime. * + * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater + * than or equal to one, this function links the event queues to the default + * profile_id i.e. profile_id 0 of the event port. + * * @param dev_id * The identifier of the device. * @@ -1593,6 +1651,10 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, * Event queue(s) to event port unlink establishment can be changed at runtime * without re-configuring the device. * + * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater + * than or equal to one, this function unlinks the event queues from the default + * profile identifier i.e. profile 0 of the event port. + * * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. * * @param dev_id @@ -1626,6 +1688,136 @@ int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks); +/** + * Link multiple source event queues supplied in *queues* to the destination + * event port designated by its *port_id* with associated profile identifier + * supplied in *profile_id* with service priorities supplied in *priorities* + * on the event device designated by its *dev_id*. + * + * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link` + * will be overwritten. + * + * Event ports by default use profile_id 0 unless it is changed using the + * call ``rte_event_port_profile_switch()``. + * + * The link establishment shall enable the event port *port_id* from + * receiving events from the specified event queue(s) supplied in *queues* + * + * An event queue may link to one or more event ports. + * The number of links can be established from an event queue to event port is + * implementation defined. + * + * Event queue(s) to event port link establishment can be changed at runtime + * without re-configuring the device to support scaling and to reduce the + * latency of critical work by establishing the link with more event ports + * at runtime. + * + * @param dev_id + * The identifier of the device. + * + * @param port_id + * Event port identifier to select the destination port to link. + * + * @param queues + * Points to an array of *nb_links* event queues to be linked + * to the event port. + * NULL value is allowed, in which case this function links all the configured + * event queues *nb_event_queues* which previously supplied to + * rte_event_dev_configure() to the event port *port_id* + * + * @param priorities + * Points to an array of *nb_links* service priorities associated with each + * event queue link to event port. + * The priority defines the event port's servicing priority for + * event queue, which may be ignored by an implementation. + * The requested priority should in the range of + * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. + * The implementation shall normalize the requested priority to + * implementation supported priority value. + * NULL value is allowed, in which case this function links the event queues + * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority + * + * @param nb_links + * The number of links to establish. This parameter is ignored if queues is + * NULL. + * + * @param profile_id + * The profile identifier associated with the links between event queues and + * event port. Should be less than the max capability reported by + * ``rte_event_dev_info::max_profiles_per_port`` + * + * @return + * The number of links actually established. The return value can be less than + * the value of the *nb_links* parameter when the implementation has the + * limitation on specific queue to port link establishment or if invalid + * parameters are specified in *queues* + * If the return value is less than *nb_links*, the remaining links at the end + * of link[] are not established, and the caller has to take care of them. + * If return value is less than *nb_links* then implementation shall update the + * rte_errno accordingly, Possible rte_errno values are + * (EDQUOT) Quota exceeded(Application tried to link the queue configured with + * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) + * (EINVAL) Invalid parameter + * + */ +__rte_experimental +int +rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id); + +/** + * Unlink multiple source event queues supplied in *queues* that belong to profile + * designated by *profile_id* from the destination event port designated by its + * *port_id* on the event device designated by its *dev_id*. + * + * If *profile_id* is set to 0 i.e., the default profile then, then this function + * will act as ``rte_event_port_unlink``. + * + * The unlink call issues an async request to disable the event port *port_id* + * from receiving events from the specified event queue *queue_id*. + * Event queue(s) to event port unlink establishment can be changed at runtime + * without re-configuring the device. + * + * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. + * + * @param dev_id + * The identifier of the device. + * + * @param port_id + * Event port identifier to select the destination port to unlink. + * + * @param queues + * Points to an array of *nb_unlinks* event queues to be unlinked + * from the event port. + * NULL value is allowed, in which case this function unlinks all the + * event queue(s) from the event port *port_id*. + * + * @param nb_unlinks + * The number of unlinks to establish. This parameter is ignored if queues is + * NULL. + * + * @param profile_id + * The profile identifier associated with the links between event queues and + * event port. Should be less than the max capability reported by + * ``rte_event_dev_info::max_profiles_per_port`` + * + * @return + * The number of unlinks successfully requested. The return value can be less + * than the value of the *nb_unlinks* parameter when the implementation has the + * limitation on specific queue to port unlink establishment or + * if invalid parameters are specified. + * If the return value is less than *nb_unlinks*, the remaining queues at the + * end of queues[] are not unlinked, and the caller has to take care of them. + * If return value is less than *nb_unlinks* then implementation shall update + * the rte_errno accordingly, Possible rte_errno values are + * (EINVAL) Invalid parameter + * + */ +__rte_experimental +int +rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], + uint16_t nb_unlinks, uint8_t profile_id); + /** * Returns the number of unlinks in progress. * @@ -1680,6 +1872,42 @@ int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[]); +/** + * Retrieve the list of source event queues and its service priority + * associated to a *profile_id* and linked to the destination event port + * designated by its *port_id* on the event device designated by its *dev_id*. + * + * @param dev_id + * The identifier of the device. + * + * @param port_id + * Event port identifier. + * + * @param[out] queues + * Points to an array of *queues* for output. + * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to + * store the event queue(s) linked with event port *port_id* + * + * @param[out] priorities + * Points to an array of *priorities* for output. + * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to + * store the service priority associated with each event queue linked + * + * @param profile_id + * The profile identifier associated with the links between event queues and + * event port. Should be less than the max capability reported by + * ``rte_event_dev_info::max_profiles_per_port`` + * + * @return + * The number of links established on the event port designated by its + * *port_id*. + * - <0 on failure. + */ +__rte_experimental +int +rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], + uint8_t priorities[], uint8_t profile_id); + /** * Retrieve the service ID of the event dev. If the adapter doesn't use * a rte_service function, this function returns -ESRCH. @@ -2265,6 +2493,53 @@ rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op) return 0; } +/** + * Change the active profile on an event port. + * + * This function is used to change the current active profile on an event port + * when multiple link profiles are configured on an event port through the + * function call ``rte_event_port_profile_links_set``. + * + * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues + * that were associated with the newly active profile will participate in + * scheduling. + * + * @param dev_id + * The identifier of the device. + * @param port_id + * The identifier of the event port. + * @param profile_id + * The identifier of the profile. + * @return + * - 0 on success. + * - -EINVAL if *dev_id*, *port_id*, or *profile_id* is invalid. + */ +__rte_experimental +static inline uint8_t +rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id) +{ + const struct rte_event_fp_ops *fp_ops; + void *port; + + fp_ops = &rte_event_fp_ops[dev_id]; + port = fp_ops->data[port_id]; + +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG + if (dev_id >= RTE_EVENT_MAX_DEVS || + port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) + return -EINVAL; + + if (port == NULL) + return -EINVAL; + + if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT) + return -EINVAL; +#endif + rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id); + + return fp_ops->profile_switch(port, profile_id); +} + #ifdef __cplusplus } #endif diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h index c328bdbc828..5b405518d1b 100644 --- a/lib/eventdev/rte_eventdev_core.h +++ b/lib/eventdev/rte_eventdev_core.h @@ -42,6 +42,13 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port, uint16_t nb_events); /**< @internal Enqueue burst of events on crypto adapter */ +typedef uint16_t (*event_dma_adapter_enqueue_t)(void *port, struct rte_event ev[], + uint16_t nb_events); +/**< @internal Enqueue burst of events on DMA adapter */ + +typedef int (*event_profile_switch_t)(void *port, uint8_t profile); +/**< @internal Switch active link profile on the event port. */ + struct rte_event_fp_ops { void **data; /**< points to array of internal port data pointers */ @@ -65,7 +72,11 @@ struct rte_event_fp_ops { /**< PMD Tx adapter enqueue same destination function. */ event_crypto_adapter_enqueue_t ca_enqueue; /**< PMD Crypto adapter enqueue function. */ - uintptr_t reserved[6]; + event_dma_adapter_enqueue_t dma_enqueue; + /**< PMD DMA adapter enqueue function. */ + event_profile_switch_t profile_switch; + /**< PMD Event switch profile function. */ + uintptr_t reserved[4]; } __rte_cache_aligned; extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS]; diff --git a/lib/eventdev/rte_eventdev_trace_fp.h b/lib/eventdev/rte_eventdev_trace_fp.h index af2172d2a58..04d510ad007 100644 --- a/lib/eventdev/rte_eventdev_trace_fp.h +++ b/lib/eventdev/rte_eventdev_trace_fp.h @@ -46,6 +46,14 @@ RTE_TRACE_POINT_FP( rte_trace_point_emit_int(op); ) +RTE_TRACE_POINT_FP( + rte_eventdev_trace_port_profile_switch, + RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, uint8_t profile), + rte_trace_point_emit_u8(dev_id); + rte_trace_point_emit_u8(port_id); + rte_trace_point_emit_u8(profile); +) + RTE_TRACE_POINT_FP( rte_eventdev_trace_eth_tx_adapter_enqueue, RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, void *ev_table, diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index b03c10d99f6..42a2f7206e4 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -101,6 +101,7 @@ EXPERIMENTAL { global: # added in 21.11 + __rte_eventdev_trace_maintain; rte_event_eth_rx_adapter_create_with_params; rte_event_eth_rx_adapter_queue_conf_get; rte_event_eth_rx_adapter_queue_stats_get; @@ -131,6 +132,29 @@ EXPERIMENTAL { rte_event_eth_tx_adapter_runtime_params_init; rte_event_eth_tx_adapter_runtime_params_set; rte_event_timer_remaining_ticks_get; + + # added in 23.11 + rte_event_dma_adapter_caps_get; + rte_event_dma_adapter_create; + rte_event_dma_adapter_create_ext; + rte_event_dma_adapter_enqueue; + rte_event_dma_adapter_event_port_get; + rte_event_dma_adapter_free; + rte_event_dma_adapter_runtime_params_get; + rte_event_dma_adapter_runtime_params_init; + rte_event_dma_adapter_runtime_params_set; + rte_event_dma_adapter_service_id_get; + rte_event_dma_adapter_start; + rte_event_dma_adapter_stats_get; + rte_event_dma_adapter_stats_reset; + rte_event_dma_adapter_stop; + rte_event_dma_adapter_vchan_add; + rte_event_dma_adapter_vchan_del; + rte_event_eth_rx_adapter_create_ext_with_params; + rte_event_port_profile_links_set; + rte_event_port_profile_unlink; + rte_event_port_profile_links_get; + __rte_eventdev_trace_port_profile_switch; }; INTERNAL { diff --git a/lib/fib/dir24_8.c b/lib/fib/dir24_8.c index 3efdcb533c4..c739e923040 100644 --- a/lib/fib/dir24_8.c +++ b/lib/fib/dir24_8.c @@ -155,7 +155,7 @@ tbl8_get_idx(struct dir24_8_tbl *dp) (dp->tbl8_idxes[i] == UINT64_MAX); i++) ; if (i < (dp->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) { - bit_idx = __builtin_ctzll(~dp->tbl8_idxes[i]); + bit_idx = rte_ctz64(~dp->tbl8_idxes[i]); dp->tbl8_idxes[i] |= (1ULL << bit_idx); return (i << BITMAP_SLAB_BIT_SIZE_LOG2) + bit_idx; } @@ -388,6 +388,12 @@ modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip, return ret; ledge = redge + (uint32_t)(1ULL << (32 - tmp_depth)); + /* + * we got to the end of address space + * and wrapped around + */ + if (ledge == 0) + break; } else { redge = ip + (uint32_t)(1ULL << (32 - depth)); if (ledge == redge && ledge != 0) diff --git a/lib/fib/trie.c b/lib/fib/trie.c index 3e780afdaf6..09470e7287c 100644 --- a/lib/fib/trie.c +++ b/lib/fib/trie.c @@ -451,6 +451,14 @@ get_nxt_net(uint8_t *ip, uint8_t depth) } } +static int +v6_addr_is_zero(const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE]) +{ + uint8_t ip_addr[RTE_FIB6_IPV6_ADDR_SIZE] = {0}; + + return rte_rib6_is_equal(ip, ip_addr); +} + static int modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE], @@ -484,11 +492,19 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, return ret; get_nxt_net(redge, tmp_depth); rte_rib6_copy_addr(ledge, redge); + /* + * we got to the end of address space + * and wrapped around + */ + if (v6_addr_is_zero(ledge)) + break; } else { rte_rib6_copy_addr(redge, ip); get_nxt_net(redge, depth); - if (rte_rib6_is_equal(ledge, redge)) + if (rte_rib6_is_equal(ledge, redge) && + !v6_addr_is_zero(ledge)) break; + ret = install_to_dp(dp, ledge, redge, next_hop); if (ret != 0) diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c index d92a903bb39..19b23f2a97e 100644 --- a/lib/hash/rte_cuckoo_hash.c +++ b/lib/hash/rte_cuckoo_hash.c @@ -1868,11 +1868,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, _mm_load_si128( (__m128i const *)prim_bkt->sig_current), _mm_set1_epi16(sig))); + /* Extract the even-index bits only */ + *prim_hash_matches &= 0x5555; /* Compare all signatures in the bucket */ *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16( _mm_load_si128( (__m128i const *)sec_bkt->sig_current), _mm_set1_epi16(sig))); + /* Extract the even-index bits only */ + *sec_hash_matches &= 0x5555; break; #elif defined(__ARM_NEON) case RTE_HASH_COMPARE_NEON: { diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c index 2228af576bb..4ff567ee5ad 100644 --- a/lib/hash/rte_thash.c +++ b/lib/hash/rte_thash.c @@ -130,7 +130,7 @@ get_bit_lfsr(struct thash_lfsr *lfsr) * masking the TAP bits defined by the polynomial and * calculating parity */ - bit = __builtin_popcount(lfsr->state & lfsr->poly) & 0x1; + bit = rte_popcount32(lfsr->state & lfsr->poly) & 0x1; ret = lfsr->state & 0x1; lfsr->state = ((lfsr->state >> 1) | (bit << (lfsr->deg - 1))) & ((1 << lfsr->deg) - 1); @@ -144,7 +144,7 @@ get_rev_bit_lfsr(struct thash_lfsr *lfsr) { uint32_t bit, ret; - bit = __builtin_popcount(lfsr->rev_state & lfsr->rev_poly) & 0x1; + bit = rte_popcount32(lfsr->rev_state & lfsr->rev_poly) & 0x1; ret = lfsr->rev_state & (1 << (lfsr->deg - 1)); lfsr->rev_state = ((lfsr->rev_state << 1) | bit) & ((1 << lfsr->deg) - 1); diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h index 7bb76ac1bb5..b7c5a4ba7d8 100644 --- a/lib/hash/rte_thash_x86_gfni.h +++ b/lib/hash/rte_thash_x86_gfni.h @@ -12,6 +12,7 @@ * using Galois Fields New Instructions. */ +#include #include #include @@ -110,7 +111,7 @@ __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple, secondary_tuple); } - chunk_len = __builtin_popcountll(load_mask); + chunk_len = rte_popcount64(load_mask); for (i = 0; i < ((chunk_len + prepend) / 8); i++, mtrx += 8) { perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes, permute_mask, permute_idx, tuple_bytes); diff --git a/lib/log/log.c b/lib/log/log.c index 52c771fb6b8..b80725a5cf2 100644 --- a/lib/log/log.c +++ b/lib/log/log.c @@ -93,7 +93,7 @@ rte_log_get_stream(void) * of stderr, even if the application closes and * reopens it. */ - return default_log_stream ? : stderr; + return default_log_stream != NULL ? default_log_stream : stderr; } return f; } diff --git a/lib/log/rte_log.h b/lib/log/rte_log.h index bdefff2a593..f7a8405de96 100644 --- a/lib/log/rte_log.h +++ b/lib/log/rte_log.h @@ -277,11 +277,7 @@ void rte_log_dump(FILE *f); * - Negative on error. */ int rte_log(uint32_t level, uint32_t logtype, const char *format, ...) -#ifdef __GNUC__ -#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) __rte_cold -#endif -#endif __rte_format_printf(3, 4); /** diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c index 4d337fca8dc..7a7a9bf6db6 100644 --- a/lib/mempool/rte_mempool.c +++ b/lib/mempool/rte_mempool.c @@ -914,6 +914,22 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, STAILQ_INIT(&mp->elt_list); STAILQ_INIT(&mp->mem_list); + /* + * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to + * set the correct index into the table of ops structs. + */ + if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) + ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); + else if (flags & RTE_MEMPOOL_F_SP_PUT) + ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); + else if (flags & RTE_MEMPOOL_F_SC_GET) + ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); + else + ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); + + if (ret) + goto exit_unlock; + /* * local_cache pointer is set even if cache_size is zero. * The local_cache points to just past the elt_pa[] array. @@ -954,7 +970,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags) { - int ret; struct rte_mempool *mp; mp = rte_mempool_create_empty(name, n, elt_size, cache_size, @@ -962,22 +977,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, if (mp == NULL) return NULL; - /* - * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to - * set the correct index into the table of ops structs. - */ - if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) - ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); - else if (flags & RTE_MEMPOOL_F_SP_PUT) - ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); - else if (flags & RTE_MEMPOOL_F_SC_GET) - ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); - else - ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); - - if (ret) - goto fail; - /* call the mempool priv initializer */ if (mp_init) mp_init(mp, mp_init_arg); diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h index a05b25d5b9e..f70bf36080f 100644 --- a/lib/mempool/rte_mempool.h +++ b/lib/mempool/rte_mempool.h @@ -78,6 +78,7 @@ struct rte_mempool_debug_stats { uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ uint64_t get_success_blks; /**< Successful allocation number of contiguous blocks. */ uint64_t get_fail_blks; /**< Failed allocation number of contiguous blocks. */ + RTE_CACHE_GUARD; } __rte_cache_aligned; #endif diff --git a/lib/meson.build b/lib/meson.build index 92cbd6bb660..6c143ce5a60 100644 --- a/lib/meson.build +++ b/lib/meson.build @@ -33,8 +33,10 @@ libraries = [ 'compressdev', 'cryptodev', 'distributor', + 'dmadev', # eventdev depends on this 'efd', 'eventdev', + 'dispatcher', # dispatcher depends on eventdev 'gpudev', 'gro', 'gso', @@ -48,7 +50,6 @@ libraries = [ 'rawdev', 'regexdev', 'mldev', - 'dmadev', 'rib', 'reorder', 'sched', @@ -66,36 +67,78 @@ libraries = [ 'node', ] +if is_ms_compiler + libraries = [ + 'log', + 'kvargs', + 'telemetry', + ] +endif + optional_libs = [ + 'bbdev', 'bitratestats', + 'bpf', 'cfgfile', + 'compressdev', + 'cryptodev', + 'dispatcher', + 'distributor', + 'dmadev', + 'efd', + 'eventdev', 'gpudev', 'graph', 'gro', 'gso', + 'ip_frag', + 'ipsec', 'jobstats', 'latencystats', + 'member', 'metrics', + 'mldev', 'node', + 'pcapng', + 'pdcp', 'pdump', 'pipeline', 'port', 'power', + 'rawdev', + 'regexdev', + 'reorder', + 'sched', + 'security', 'table', 'vhost', ] -disabled_libs = [] -opt_disabled_libs = run_command(list_dir_globs, get_option('disable_libs'), - check: true).stdout().split() -foreach l:opt_disabled_libs +always_enable = [] +foreach l:libraries if not optional_libs.contains(l) - warning('Cannot disable mandatory library "@0@"'.format(l)) + always_enable += l + endif +endforeach + +enable_deprecated_libs = [] +foreach l:run_command(list_dir_globs, get_option('enable_deprecated_libs'), + check: true).stdout().split() + if not dpdk_libs_deprecated.contains(l) continue endif - disabled_libs += l + enable_deprecated_libs += l endforeach +disable_libs = run_command(list_dir_globs, get_option('disable_libs'), check: true).stdout().split() + +enable_libs = run_command(list_dir_globs, get_option('enable_libs'), check: true).stdout().split() +require_libs = true +if enable_libs.length() == 0 + require_libs = false + enable_libs += optional_libs +endif +enable_libs += always_enable default_cflags = machine_args default_cflags += ['-DALLOW_EXPERIMENTAL_API'] @@ -129,17 +172,31 @@ foreach l:libraries deps += ['eal'] endif - if disabled_libs.contains(l) - build = false - reason = 'explicitly disabled via build config' - if dpdk_libs_deprecated.contains(l) - reason += ' (deprecated lib)' - endif - else - if dpdk_libs_deprecated.contains(l) + if dpdk_libs_deprecated.contains(l) + if not enable_deprecated_libs.contains(l) + build = false + reason = 'not in enabled deprecated libraries build config' + else warning('Enabling deprecated library, "@0@"'.format(l)) endif + elif not enable_libs.contains(l) + build = false + reason = 'not in enabled libraries build config' + elif disable_libs.contains(l) + if always_enable.contains(l) + warning('Cannot disable mandatory library "@0@"'.format(l)) + else + build = false + reason = 'explicitly disabled via build config' + endif + endif + + if build subdir(l) + if not build and require_libs + error('Cannot build explicitly requested lib "@0@".\n'.format(name) + +'\tReason: ' + reason) + endif endif if name != l warning('Library name, "@0@", and directory name, "@1@", do not match'.format(name, l)) @@ -159,6 +216,10 @@ foreach l:libraries endif message('Disabling @1@ [@2@]: missing internal dependency "@0@"' .format(d, name, 'lib/' + l)) + if require_libs + error('Cannot build explicitly requested lib "@0@".\n'.format(name) + + '\tPlease add missing dependency "@0@" to "enable_libs" option'.format(d)) + endif else shared_deps += [get_variable('shared_rte_' + d)] static_deps += [get_variable('static_rte_' + d)] @@ -230,9 +291,16 @@ foreach l:libraries output: '@0@_exports.def'.format(libname)) lk_deps += [def_file] - lk_args = ['-Wl,/def:' + def_file.full_path()] - if meson.version().version_compare('<0.54.0') - lk_args += ['-Wl,/implib:lib\\librte_' + l + '.dll.a'] + if is_ms_compiler + lk_args = ['/def:' + def_file.full_path()] + if meson.version().version_compare('<0.54.0') + lk_args += ['/implib:lib\\librte_' + l + '.dll.a'] + endif + else + lk_args = ['-Wl,/def:' + def_file.full_path()] + if meson.version().version_compare('<0.54.0') + lk_args += ['-Wl,/implib:lib\\librte_' + l + '.dll.a'] + endif endif else if is_windows diff --git a/lib/mldev/meson.build b/lib/mldev/meson.build index 5769b0640a1..0079ccd2052 100644 --- a/lib/mldev/meson.build +++ b/lib/mldev/meson.build @@ -35,7 +35,7 @@ driver_sdk_headers += files( 'mldev_utils.h', ) -deps += ['mempool'] +deps += ['mempool', 'mbuf'] if get_option('buildtype').contains('debug') cflags += [ '-DRTE_LIBRTE_ML_DEV_DEBUG' ] diff --git a/lib/mldev/mldev_utils.c b/lib/mldev/mldev_utils.c index d2442b123b8..ccd2c39ca89 100644 --- a/lib/mldev/mldev_utils.c +++ b/lib/mldev/mldev_utils.c @@ -86,33 +86,3 @@ rte_ml_io_type_to_str(enum rte_ml_io_type type, char *str, int len) rte_strlcpy(str, "invalid", len); } } - -void -rte_ml_io_format_to_str(enum rte_ml_io_format format, char *str, int len) -{ - switch (format) { - case RTE_ML_IO_FORMAT_NCHW: - rte_strlcpy(str, "NCHW", len); - break; - case RTE_ML_IO_FORMAT_NHWC: - rte_strlcpy(str, "NHWC", len); - break; - case RTE_ML_IO_FORMAT_CHWN: - rte_strlcpy(str, "CHWN", len); - break; - case RTE_ML_IO_FORMAT_3D: - rte_strlcpy(str, "3D", len); - break; - case RTE_ML_IO_FORMAT_2D: - rte_strlcpy(str, "Matrix", len); - break; - case RTE_ML_IO_FORMAT_1D: - rte_strlcpy(str, "Vector", len); - break; - case RTE_ML_IO_FORMAT_SCALAR: - rte_strlcpy(str, "Scalar", len); - break; - default: - rte_strlcpy(str, "invalid", len); - } -} diff --git a/lib/mldev/mldev_utils.h b/lib/mldev/mldev_utils.h index 5bc80204532..220afb42f0d 100644 --- a/lib/mldev/mldev_utils.h +++ b/lib/mldev/mldev_utils.h @@ -52,22 +52,6 @@ __rte_internal void rte_ml_io_type_to_str(enum rte_ml_io_type type, char *str, int len); -/** - * @internal - * - * Get the name of an ML IO format. - * - * @param[in] type - * Enumeration of ML IO format. - * @param[in] str - * Address of character array. - * @param[in] len - * Length of character array. - */ -__rte_internal -void -rte_ml_io_format_to_str(enum rte_ml_io_format format, char *str, int len); - /** * @internal * diff --git a/lib/mldev/mldev_utils_scalar.c b/lib/mldev/mldev_utils_scalar.c index 92be5daee80..4d6cb880240 100644 --- a/lib/mldev/mldev_utils_scalar.c +++ b/lib/mldev/mldev_utils_scalar.c @@ -413,7 +413,7 @@ __float16_to_float32_scalar_rtx(uint16_t f16) if (f16_m == 0) { /* zero signed */ f32_e = 0; } else { /* subnormal numbers */ - clz = __builtin_clz((uint32_t)f16_m) - sizeof(uint32_t) * 8 + FP16_LSB_E; + clz = rte_clz32((uint32_t)f16_m) - sizeof(uint32_t) * 8 + FP16_LSB_E; e_16 = (int)f16_e - clz; f32_e = FP32_BIAS_E + e_16 - FP16_BIAS_E; diff --git a/lib/mldev/rte_mldev.c b/lib/mldev/rte_mldev.c index 0d8ccd32124..cc5f2e0cc63 100644 --- a/lib/mldev/rte_mldev.c +++ b/lib/mldev/rte_mldev.c @@ -692,46 +692,8 @@ rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer) } int -rte_ml_io_input_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches, - uint64_t *input_qsize, uint64_t *input_dsize) -{ - struct rte_ml_dev *dev; - - if (!rte_ml_dev_is_valid_dev(dev_id)) { - RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id); - return -EINVAL; - } - - dev = rte_ml_dev_pmd_get_dev(dev_id); - if (*dev->dev_ops->io_input_size_get == NULL) - return -ENOTSUP; - - return (*dev->dev_ops->io_input_size_get)(dev, model_id, nb_batches, input_qsize, - input_dsize); -} - -int -rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches, - uint64_t *output_qsize, uint64_t *output_dsize) -{ - struct rte_ml_dev *dev; - - if (!rte_ml_dev_is_valid_dev(dev_id)) { - RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id); - return -EINVAL; - } - - dev = rte_ml_dev_pmd_get_dev(dev_id); - if (*dev->dev_ops->io_output_size_get == NULL) - return -ENOTSUP; - - return (*dev->dev_ops->io_output_size_get)(dev, model_id, nb_batches, output_qsize, - output_dsize); -} - -int -rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void *dbuffer, - void *qbuffer) +rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **dbuffer, + struct rte_ml_buff_seg **qbuffer) { struct rte_ml_dev *dev; @@ -754,12 +716,12 @@ rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void return -EINVAL; } - return (*dev->dev_ops->io_quantize)(dev, model_id, nb_batches, dbuffer, qbuffer); + return (*dev->dev_ops->io_quantize)(dev, model_id, dbuffer, qbuffer); } int -rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void *qbuffer, - void *dbuffer) +rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **qbuffer, + struct rte_ml_buff_seg **dbuffer) { struct rte_ml_dev *dev; @@ -782,7 +744,7 @@ rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, voi return -EINVAL; } - return (*dev->dev_ops->io_dequantize)(dev, model_id, nb_batches, qbuffer, dbuffer); + return (*dev->dev_ops->io_dequantize)(dev, model_id, qbuffer, dbuffer); } /** Initialise rte_ml_op mempool element */ diff --git a/lib/mldev/rte_mldev.h b/lib/mldev/rte_mldev.h index fc3525c1ab5..63b2670bb04 100644 --- a/lib/mldev/rte_mldev.h +++ b/lib/mldev/rte_mldev.h @@ -228,12 +228,14 @@ struct rte_ml_dev_info { /**< Maximum allowed number of descriptors for queue pair by the device. * @see struct rte_ml_dev_qp_conf::nb_desc */ + uint16_t max_io; + /**< Maximum number of inputs/outputs supported per model. */ uint16_t max_segments; /**< Maximum number of scatter-gather entries supported by the device. * @see struct rte_ml_buff_seg struct rte_ml_buff_seg::next */ - uint16_t min_align_size; - /**< Minimum alignment size of IO buffers used by the device. */ + uint16_t align_size; + /**< Alignment size of IO buffers used by the device. */ }; /** @@ -429,10 +431,28 @@ struct rte_ml_op { /**< Reserved for future use. */ struct rte_mempool *mempool; /**< Pool from which operation is allocated. */ - struct rte_ml_buff_seg input; - /**< Input buffer to hold the inference data. */ - struct rte_ml_buff_seg output; - /**< Output buffer to hold the inference output by the driver. */ + struct rte_ml_buff_seg **input; + /**< Array of buffer segments to hold the inference input data. + * + * When the model supports IO layout RTE_ML_IO_LAYOUT_PACKED, size of + * the array is 1. + * + * When the model supports IO layout RTE_ML_IO_LAYOUT_SPLIT, size of + * the array is rte_ml_model_info::nb_inputs. + * + * @see struct rte_ml_dev_info::io_layout + */ + struct rte_ml_buff_seg **output; + /**< Array of buffer segments to hold the inference output data. + * + * When the model supports IO layout RTE_ML_IO_LAYOUT_PACKED, size of + * the array is 1. + * + * When the model supports IO layout RTE_ML_IO_LAYOUT_SPLIT, size of + * the array is rte_ml_model_info::nb_outputs. + * + * @see struct rte_ml_dev_info::io_layout + */ union { uint64_t user_u64; /**< User data as uint64_t.*/ @@ -863,60 +883,55 @@ enum rte_ml_io_type { /**< 16-bit brain floating point number. */ }; -/** - * Input and output format. This is used to represent the encoding type of multi-dimensional - * used by ML models. - */ -enum rte_ml_io_format { - RTE_ML_IO_FORMAT_NCHW = 1, - /**< Batch size (N) x channels (C) x height (H) x width (W) */ - RTE_ML_IO_FORMAT_NHWC, - /**< Batch size (N) x height (H) x width (W) x channels (C) */ - RTE_ML_IO_FORMAT_CHWN, - /**< Channels (C) x height (H) x width (W) x batch size (N) */ - RTE_ML_IO_FORMAT_3D, - /**< Format to represent a 3 dimensional data */ - RTE_ML_IO_FORMAT_2D, - /**< Format to represent matrix data */ - RTE_ML_IO_FORMAT_1D, - /**< Format to represent vector data */ - RTE_ML_IO_FORMAT_SCALAR, - /**< Format to represent scalar data */ +/** ML I/O buffer layout */ +enum rte_ml_io_layout { + RTE_ML_IO_LAYOUT_PACKED, + /**< All inputs for the model should packed in a single buffer with + * no padding between individual inputs. The buffer is expected to + * be aligned to rte_ml_dev_info::align_size. + * + * When I/O segmentation is supported by the device, the packed + * data can be split into multiple segments. In this case, each + * segment is expected to be aligned to rte_ml_dev_info::align_size + * + * Same applies to output. + * + * @see struct rte_ml_dev_info::max_segments + */ + RTE_ML_IO_LAYOUT_SPLIT + /**< Each input for the model should be stored as separate buffers + * and each input should be aligned to rte_ml_dev_info::align_size. + * + * When I/O segmentation is supported, each input can be split into + * multiple segments. In this case, each segment is expected to be + * aligned to rte_ml_dev_info::align_size + * + * Same applies to output. + * + * @see struct rte_ml_dev_info::max_segments + */ }; /** - * Input and output shape. This structure represents the encoding format and dimensions - * of the tensor or vector. - * - * The data can be a 4D / 3D tensor, matrix, vector or a scalar. Number of dimensions used - * for the data would depend on the format. Unused dimensions to be set to 1. - */ -struct rte_ml_io_shape { - enum rte_ml_io_format format; - /**< Format of the data */ - uint32_t w; - /**< First dimension */ - uint32_t x; - /**< Second dimension */ - uint32_t y; - /**< Third dimension */ - uint32_t z; - /**< Fourth dimension */ -}; - -/** Input and output data information structure + * Input and output data information structure * * Specifies the type and shape of input and output data. */ struct rte_ml_io_info { char name[RTE_ML_STR_MAX]; /**< Name of data */ - struct rte_ml_io_shape shape; - /**< Shape of data */ - enum rte_ml_io_type qtype; - /**< Type of quantized data */ - enum rte_ml_io_type dtype; - /**< Type of de-quantized data */ + uint32_t nb_dims; + /**< Number of dimensions in shape */ + uint32_t *shape; + /**< Shape of the tensor for rte_ml_model_info::min_batches of the model. */ + enum rte_ml_io_type type; + /**< Type of data + * @see enum rte_ml_io_type + */ + uint64_t nb_elements; + /** Number of elements in tensor */ + uint64_t size; + /** Size of tensor in bytes */ }; /** Model information structure */ @@ -929,8 +944,16 @@ struct rte_ml_model_info { /**< Model ID */ uint16_t device_id; /**< Device ID */ - uint16_t batch_size; - /**< Maximum number of batches that the model can process simultaneously */ + enum rte_ml_io_layout io_layout; + /**< I/O buffer layout for the model */ + uint16_t min_batches; + /**< Minimum number of batches that the model can process + * in one inference request + */ + uint16_t max_batches; + /**< Maximum number of batches that the model can process + * in one inference request + */ uint32_t nb_inputs; /**< Number of inputs */ const struct rte_ml_io_info *input_info; @@ -985,66 +1008,6 @@ rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer); /* IO operations */ -/** - * Get size of quantized and dequantized input buffers. - * - * Calculate the size of buffers required for quantized and dequantized input data. - * This API would return the buffer sizes for the number of batches provided and would - * consider the alignment requirements as per the PMD. Input sizes computed by this API can - * be used by the application to allocate buffers. - * - * @param[in] dev_id - * The identifier of the device. - * @param[in] model_id - * Identifier for the model created - * @param[in] nb_batches - * Number of batches of input to be processed in a single inference job - * @param[out] input_qsize - * Quantized input size pointer. - * NULL value is allowed, in which case input_qsize is not calculated by the driver. - * @param[out] input_dsize - * Dequantized input size pointer. - * NULL value is allowed, in which case input_dsize is not calculated by the driver. - * - * @return - * - Returns 0 on success - * - Returns negative value on failure - */ -__rte_experimental -int -rte_ml_io_input_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches, - uint64_t *input_qsize, uint64_t *input_dsize); - -/** - * Get size of quantized and dequantized output buffers. - * - * Calculate the size of buffers required for quantized and dequantized output data. - * This API would return the buffer sizes for the number of batches provided and would consider - * the alignment requirements as per the PMD. Output sizes computed by this API can be used by the - * application to allocate buffers. - * - * @param[in] dev_id - * The identifier of the device. - * @param[in] model_id - * Identifier for the model created - * @param[in] nb_batches - * Number of batches of input to be processed in a single inference job - * @param[out] output_qsize - * Quantized output size pointer. - * NULL value is allowed, in which case output_qsize is not calculated by the driver. - * @param[out] output_dsize - * Dequantized output size pointer. - * NULL value is allowed, in which case output_dsize is not calculated by the driver. - * - * @return - * - Returns 0 on success - * - Returns negative value on failure - */ -__rte_experimental -int -rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches, - uint64_t *output_qsize, uint64_t *output_dsize); - /** * Quantize input data. * @@ -1056,8 +1019,6 @@ rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches * The identifier of the device. * @param[in] model_id * Identifier for the model - * @param[in] nb_batches - * Number of batches in the dequantized input buffer * @param[in] dbuffer * Address of dequantized input data * @param[in] qbuffer @@ -1069,8 +1030,8 @@ rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches */ __rte_experimental int -rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void *dbuffer, - void *qbuffer); +rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **dbuffer, + struct rte_ml_buff_seg **qbuffer); /** * Dequantize output data. @@ -1082,8 +1043,6 @@ rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void * The identifier of the device. * @param[in] model_id * Identifier for the model - * @param[in] nb_batches - * Number of batches in the dequantized output buffer * @param[in] qbuffer * Address of quantized output data * @param[in] dbuffer @@ -1095,8 +1054,8 @@ rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void */ __rte_experimental int -rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void *qbuffer, - void *dbuffer); +rte_ml_io_dequantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **qbuffer, + struct rte_ml_buff_seg **dbuffer); /* ML op pool operations */ diff --git a/lib/mldev/rte_mldev_core.h b/lib/mldev/rte_mldev_core.h index 78b8b7633dd..2279b1dcecb 100644 --- a/lib/mldev/rte_mldev_core.h +++ b/lib/mldev/rte_mldev_core.h @@ -466,54 +466,6 @@ typedef int (*mldev_model_info_get_t)(struct rte_ml_dev *dev, uint16_t model_id, */ typedef int (*mldev_model_params_update_t)(struct rte_ml_dev *dev, uint16_t model_id, void *buffer); -/** - * @internal - * - * Get size of input buffers. - * - * @param dev - * ML device pointer. - * @param model_id - * Model ID to use. - * @param nb_batches - * Number of batches. - * @param input_qsize - * Size of quantized input. - * @param input_dsize - * Size of dequantized input. - * - * @return - * - 0 on success. - * - <0, error on failure. - */ -typedef int (*mldev_io_input_size_get_t)(struct rte_ml_dev *dev, uint16_t model_id, - uint32_t nb_batches, uint64_t *input_qsize, - uint64_t *input_dsize); - -/** - * @internal - * - * Get size of output buffers. - * - * @param dev - * ML device pointer. - * @param model_id - * Model ID to use. - * @param nb_batches - * Number of batches. - * @param output_qsize - * Size of quantized output. - * @param output_dsize - * Size of dequantized output. - * - * @return - * - 0 on success. - * - <0, error on failure. - */ -typedef int (*mldev_io_output_size_get_t)(struct rte_ml_dev *dev, uint16_t model_id, - uint32_t nb_batches, uint64_t *output_qsize, - uint64_t *output_dsize); - /** * @internal * @@ -523,8 +475,6 @@ typedef int (*mldev_io_output_size_get_t)(struct rte_ml_dev *dev, uint16_t model * ML device pointer. * @param model_id * Model ID to use. - * @param nb_batches - * Number of batches. * @param dbuffer * Pointer t de-quantized data buffer. * @param qbuffer @@ -534,8 +484,9 @@ typedef int (*mldev_io_output_size_get_t)(struct rte_ml_dev *dev, uint16_t model * - 0 on success. * - <0, error on failure. */ -typedef int (*mldev_io_quantize_t)(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batches, - void *dbuffer, void *qbuffer); +typedef int (*mldev_io_quantize_t)(struct rte_ml_dev *dev, uint16_t model_id, + struct rte_ml_buff_seg **dbuffer, + struct rte_ml_buff_seg **qbuffer); /** * @internal @@ -546,8 +497,6 @@ typedef int (*mldev_io_quantize_t)(struct rte_ml_dev *dev, uint16_t model_id, ui * ML device pointer. * @param model_id * Model ID to use. - * @param nb_batches - * Number of batches. * @param qbuffer * Pointer t de-quantized data buffer. * @param dbuffer @@ -557,8 +506,9 @@ typedef int (*mldev_io_quantize_t)(struct rte_ml_dev *dev, uint16_t model_id, ui * - 0 on success. * - <0, error on failure. */ -typedef int (*mldev_io_dequantize_t)(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batches, - void *qbuffer, void *dbuffer); +typedef int (*mldev_io_dequantize_t)(struct rte_ml_dev *dev, uint16_t model_id, + struct rte_ml_buff_seg **qbuffer, + struct rte_ml_buff_seg **dbuffer); /** * @internal @@ -629,12 +579,6 @@ struct rte_ml_dev_ops { /** Update model params. */ mldev_model_params_update_t model_params_update; - /** Get input buffer size. */ - mldev_io_input_size_get_t io_input_size_get; - - /** Get output buffer size. */ - mldev_io_output_size_get_t io_output_size_get; - /** Quantize data */ mldev_io_quantize_t io_quantize; diff --git a/lib/mldev/version.map b/lib/mldev/version.map index 0706b565be6..99841db6aa9 100644 --- a/lib/mldev/version.map +++ b/lib/mldev/version.map @@ -23,8 +23,6 @@ EXPERIMENTAL { rte_ml_dev_xstats_reset; rte_ml_enqueue_burst; rte_ml_io_dequantize; - rte_ml_io_input_size_get; - rte_ml_io_output_size_get; rte_ml_io_quantize; rte_ml_model_info_get; rte_ml_model_load; @@ -51,7 +49,6 @@ INTERNAL { rte_ml_io_type_size_get; rte_ml_io_type_to_str; - rte_ml_io_format_to_str; rte_ml_io_float32_to_int8; rte_ml_io_int8_to_float32; rte_ml_io_float32_to_uint8; diff --git a/lib/net/meson.build b/lib/net/meson.build index b1bc27bad5f..0b691389495 100644 --- a/lib/net/meson.build +++ b/lib/net/meson.build @@ -5,6 +5,8 @@ headers = files( 'rte_ip.h', 'rte_tcp.h', 'rte_udp.h', + 'rte_tls.h', + 'rte_dtls.h', 'rte_esp.h', 'rte_sctp.h', 'rte_icmp.h', diff --git a/lib/net/rte_dtls.h b/lib/net/rte_dtls.h new file mode 100644 index 00000000000..4f541df89c0 --- /dev/null +++ b/lib/net/rte_dtls.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell. + */ + +#ifndef RTE_DTLS_H +#define RTE_DTLS_H + +/** + * @file + * + * Datagram transport layer security (DTLS) related defines. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_DTLS_TYPE_INVALID 0 /**< Invalid DTLS message type. */ +#define RTE_DTLS_TYPE_CHANGE_CIPHER_SPEC 20 /**< Change cipher spec message. */ +#define RTE_DTLS_TYPE_ALERT 21 /**< Alert message. */ +#define RTE_DTLS_TYPE_HANDSHAKE 22 /**< Handshake message for DTLS. */ +#define RTE_DTLS_TYPE_APPDATA 23 /**< DTLS application data message. */ +#define RTE_DTLS_TYPE_HEARTBEAT 24 /**< DTLS 1.3 heartbeat message. */ +#define RTE_DTLS_TYPE_CIPHERTEXT_WITH_CID 25 /**< DTLS 1.3 ciphertext with CID message. */ +#define RTE_DTLS_TYPE_ACK 26 /**< DTLS 1.3 ACK message. */ +#define RTE_DTLS_TYPE_MAX 255 /**< Maximum value as DTLS content type. */ + +#define RTE_DTLS_VERSION_1_2 0xFEFD /**< DTLS 1.2 version. 1's complement of 1.2. */ +#define RTE_DTLS_VERSION_1_3 0xFEFC /**< DTLS 1.3 version. 1's complement of 1.3. */ + +/** + * DTLS Header + */ +__extension__ +struct rte_dtls_hdr { + /** Content type of DTLS packet. Defined as RTE_DTLS_TYPE_*. */ + uint8_t type; + /** DTLS Version defined as RTE_DTLS_VERSION*. */ + rte_be16_t version; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + /** The sequence number for the DTLS record. */ + uint64_t sequence_number : 48; + /** A counter value that is incremented on every cipher state change. */ + uint64_t epoch : 16; +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN + /** A counter value that is incremented on every cipher state change. */ + uint64_t epoch : 16; + /** The sequence number for the DTLS record. */ + uint64_t sequence_number : 48; +#endif + /** The length (in bytes) of the following DTLS packet. */ + rte_be16_t length; +} __rte_packed; + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_DTLS_H */ diff --git a/lib/net/rte_ether.c b/lib/net/rte_ether.c index 66d9a9d0699..f59c20289d3 100644 --- a/lib/net/rte_ether.c +++ b/lib/net/rte_ether.c @@ -38,7 +38,8 @@ static int8_t get_xdigit(char ch) } /* Convert 00:11:22:33:44:55 to ethernet address */ -static bool get_ether_addr6(const char *s0, struct rte_ether_addr *ea) +static bool get_ether_addr6(const char *s0, struct rte_ether_addr *ea, + const char sep) { const char *s = s0; int i; @@ -48,25 +49,29 @@ static bool get_ether_addr6(const char *s0, struct rte_ether_addr *ea) x = get_xdigit(*s++); if (x < 0) - return false; + return false; /* not a hex digit */ - ea->addr_bytes[i] = x << 4; - x = get_xdigit(*s++); - if (x < 0) - return false; - ea->addr_bytes[i] |= x; + ea->addr_bytes[i] = x; + if (*s != sep && *s != '\0') { + x = get_xdigit(*s++); + if (x < 0) + return false; /* not a hex digit */ + ea->addr_bytes[i] <<= 4; + ea->addr_bytes[i] |= x; + } if (i < RTE_ETHER_ADDR_LEN - 1 && - *s++ != ':') - return false; + *s++ != sep) + return false; /* premature end of string */ } - /* return true if at end of string */ + /* return true if no trailing characters */ return *s == '\0'; } /* Convert 0011:2233:4455 to ethernet address */ -static bool get_ether_addr3(const char *s, struct rte_ether_addr *ea) +static bool get_ether_addr3(const char *s, struct rte_ether_addr *ea, + const char sep) { int i, j; @@ -78,14 +83,15 @@ static bool get_ether_addr3(const char *s, struct rte_ether_addr *ea) x = get_xdigit(*s++); if (x < 0) - return false; + return false; /* not a hex digit */ w = (w << 4) | x; } + ea->addr_bytes[i] = w >> 8; ea->addr_bytes[i + 1] = w & 0xff; if (i < RTE_ETHER_ADDR_LEN - 2 && - *s++ != ':') + *s++ != sep) return false; } @@ -93,17 +99,56 @@ static bool get_ether_addr3(const char *s, struct rte_ether_addr *ea) } /* - * Like ether_aton_r but can handle either - * XX:XX:XX:XX:XX:XX or XXXX:XXXX:XXXX - * and is more restrictive. + * Scan input to see if separated by dash, colon or period + * Returns separator and number of matches + * If separators are mixed will return + */ +static unsigned int get_ether_sep(const char *s, char *sep) +{ + static const char separators[] = "-:."; + unsigned int count = 0; + const char *cp; + + cp = strpbrk(s, separators); + if (cp == NULL) + return 0; /* no separator found */ + + *sep = *cp; /* return the separator */ + do { + ++count; + /* find next instance of separator */ + cp = strchr(cp + 1, *sep); + } while (cp != NULL); + + return count; +} + +/* + * Be liberal in accepting a wide variety of notational formats + * for MAC address including: + * - Linux format six groups of hexadecimal digits separated by colon + * - Windows format six groups separated by hyphen + * - two groups hexadecimal digits */ int rte_ether_unformat_addr(const char *s, struct rte_ether_addr *ea) { - if (get_ether_addr6(s, ea)) - return 0; - if (get_ether_addr3(s, ea)) - return 0; + unsigned int count; + char sep = '\0'; + + count = get_ether_sep(s, &sep); + switch (count) { + case 5: /* i.e 01:23:45:67:89:AB */ + if (get_ether_addr6(s, ea, sep)) + return 0; + break; + case 2: /* i.e 0123.4567.89AB */ + if (get_ether_addr3(s, ea, sep)) + return 0; + break; + default: + break; + } rte_errno = EINVAL; return -1; diff --git a/lib/net/rte_ether.h b/lib/net/rte_ether.h index b35c72c7b0e..ce073ea818a 100644 --- a/lib/net/rte_ether.h +++ b/lib/net/rte_ether.h @@ -254,9 +254,15 @@ rte_ether_format_addr(char *buf, uint16_t size, * * @param str * A pointer to buffer contains the formatted MAC address. - * The supported formats are: - * XX:XX:XX:XX:XX:XX or XXXX:XXXX:XXXX + * Accepts either byte or word format separated by colon, + * hyphen or period. + * + * The example formats are: + * XX:XX:XX:XX:XX:XX - Canonical form + * XX-XX-XX-XX-XX-XX - Windows and IEEE 802 + * XXXX.XXXX.XXXX - Cisco * where XX is a hex digit: 0-9, a-f, or A-F. + * In the byte format, leading zeros are optional. * @param eth_addr * A pointer to a ether_addr structure. * @return diff --git a/lib/net/rte_tls.h b/lib/net/rte_tls.h new file mode 100644 index 00000000000..2eb3c6d453d --- /dev/null +++ b/lib/net/rte_tls.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell. + */ + +#ifndef RTE_TLS_H +#define RTE_TLS_H + +/** + * @file + * + * Transport layer security (TLS) related defines. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_TLS_TYPE_INVALID 0 /**< Invalid TLS message type. */ +#define RTE_TLS_TYPE_CHANGE_CIPHER_SPEC 20 /**< Change cipher spec message. */ +#define RTE_TLS_TYPE_ALERT 21 /**< Alert message. */ +#define RTE_TLS_TYPE_HANDSHAKE 22 /**< Handshake message for TLS. */ +#define RTE_TLS_TYPE_APPDATA 23 /**< TLS application data message. */ +#define RTE_TLS_TYPE_HEARTBEAT 24 /**< TLS 1.3 heartbeat message. */ +#define RTE_TLS_TYPE_MAX 255 /**< Maximum value as TLS content type. */ + +#define RTE_TLS_VERSION_1_2 0x0303 /**< TLS 1.2 version. */ +#define RTE_TLS_VERSION_1_3 0x0304 /**< TLS 1.3 version. */ + +/** + * TLS Header + */ +__extension__ +struct rte_tls_hdr { + /** Content type of TLS packet. Defined as RTE_TLS_TYPE_*. */ + uint8_t type; + /** TLS Version defined as RTE_TLS_VERSION*. */ + rte_be16_t version; + /** The length (in bytes) of the following TLS packet. */ + rte_be16_t length; +} __rte_packed; + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_TLS_H */ diff --git a/lib/node/ethdev_rx.c b/lib/node/ethdev_rx.c index d1310349919..3e8fac1df46 100644 --- a/lib/node/ethdev_rx.c +++ b/lib/node/ethdev_rx.c @@ -215,6 +215,7 @@ static struct rte_node_register ethdev_rx_node_base = { .next_nodes = { [ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls", [ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup", + [ETHDEV_RX_NEXT_IP4_REASSEMBLY] = "ip4_reassembly", }, }; diff --git a/lib/node/ethdev_rx_priv.h b/lib/node/ethdev_rx_priv.h index 7f24cf962e5..574a76c2a62 100644 --- a/lib/node/ethdev_rx_priv.h +++ b/lib/node/ethdev_rx_priv.h @@ -39,6 +39,7 @@ struct ethdev_rx_node_elem { enum ethdev_rx_next_nodes { ETHDEV_RX_NEXT_IP4_LOOKUP, ETHDEV_RX_NEXT_PKT_CLS, + ETHDEV_RX_NEXT_IP4_REASSEMBLY, ETHDEV_RX_NEXT_MAX, }; diff --git a/lib/node/ip4_local.c b/lib/node/ip4_local.c new file mode 100644 index 00000000000..288f9399ff3 --- /dev/null +++ b/lib/node/ip4_local.c @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell International Ltd. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_node_ip4_api.h" + +#include "node_private.h" + +static uint16_t +ip4_local_node_process_scalar(struct rte_graph *graph, struct rte_node *node, + void **objs, uint16_t nb_objs) +{ + void **to_next, **from; + uint16_t last_spec = 0; + rte_edge_t next_index; + struct rte_mbuf *mbuf; + uint16_t held = 0; + uint32_t l4; + int i; + + /* Speculative next */ + next_index = RTE_NODE_IP4_LOCAL_NEXT_UDP4_INPUT; + + from = objs; + to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs); + for (i = 0; i < nb_objs; i++) { + uint16_t next; + + mbuf = (struct rte_mbuf *)objs[i]; + l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK; + + next = (l4 == RTE_PTYPE_L4_UDP) + ? next_index + : RTE_NODE_IP4_LOCAL_NEXT_PKT_DROP; + + if (unlikely(next_index != next)) { + /* Copy things successfully speculated till now */ + rte_memcpy(to_next, from, last_spec * sizeof(from[0])); + from += last_spec; + to_next += last_spec; + held += last_spec; + last_spec = 0; + + rte_node_enqueue_x1(graph, node, next, from[0]); + from += 1; + } else { + last_spec += 1; + } + } + /* !!! Home run !!! */ + if (likely(last_spec == nb_objs)) { + rte_node_next_stream_move(graph, node, next_index); + return nb_objs; + } + held += last_spec; + rte_memcpy(to_next, from, last_spec * sizeof(from[0])); + rte_node_next_stream_put(graph, node, next_index, held); + + return nb_objs; +} + +static struct rte_node_register ip4_local_node = { + .process = ip4_local_node_process_scalar, + .name = "ip4_local", + + .nb_edges = RTE_NODE_IP4_LOCAL_NEXT_PKT_DROP + 1, + .next_nodes = { + [RTE_NODE_IP4_LOCAL_NEXT_UDP4_INPUT] = "udp4_input", + [RTE_NODE_IP4_LOCAL_NEXT_PKT_DROP] = "pkt_drop", + }, +}; + +RTE_NODE_REGISTER(ip4_local_node); diff --git a/lib/node/ip4_lookup.c b/lib/node/ip4_lookup.c index 8bce03d7db9..0dbfde64fe8 100644 --- a/lib/node/ip4_lookup.c +++ b/lib/node/ip4_lookup.c @@ -225,8 +225,9 @@ static struct rte_node_register ip4_lookup_node = { .init = ip4_lookup_node_init, - .nb_edges = RTE_NODE_IP4_LOOKUP_NEXT_MAX, + .nb_edges = RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP + 1, .next_nodes = { + [RTE_NODE_IP4_LOOKUP_NEXT_IP4_LOCAL] = "ip4_local", [RTE_NODE_IP4_LOOKUP_NEXT_REWRITE] = "ip4_rewrite", [RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP] = "pkt_drop", }, diff --git a/lib/node/ip4_reassembly.c b/lib/node/ip4_reassembly.c new file mode 100644 index 00000000000..04823cc5968 --- /dev/null +++ b/lib/node/ip4_reassembly.c @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_node_ip4_api.h" + +#include "ip4_reassembly_priv.h" +#include "node_private.h" + +struct ip4_reassembly_elem { + struct ip4_reassembly_elem *next; + struct ip4_reassembly_ctx ctx; + rte_node_t node_id; +}; + +/* IP4 reassembly global data struct */ +struct ip4_reassembly_node_main { + struct ip4_reassembly_elem *head; +}; + +typedef struct ip4_reassembly_ctx ip4_reassembly_ctx_t; +typedef struct ip4_reassembly_elem ip4_reassembly_elem_t; + +static struct ip4_reassembly_node_main ip4_reassembly_main; + +static uint16_t +ip4_reassembly_node_process(struct rte_graph *graph, struct rte_node *node, void **objs, + uint16_t nb_objs) +{ +#define PREFETCH_OFFSET 4 + struct rte_mbuf *mbuf, *mbuf_out; + struct rte_ip_frag_death_row *dr; + struct ip4_reassembly_ctx *ctx; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ip_frag_tbl *tbl; + void **to_next, **to_free; + uint16_t idx = 0; + int i; + + ctx = (struct ip4_reassembly_ctx *)node->ctx; + + /* Get core specific reassembly tbl */ + tbl = ctx->tbl; + dr = ctx->dr; + + for (i = 0; i < PREFETCH_OFFSET && i < nb_objs; i++) { + rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i], void *, + sizeof(struct rte_ether_hdr))); + } + + to_next = node->objs; + for (i = 0; i < nb_objs - PREFETCH_OFFSET; i++) { +#if RTE_GRAPH_BURST_SIZE > 64 + /* Prefetch next-next mbufs */ + if (likely(i + 8 < nb_objs)) + rte_prefetch0(objs[i + 8]); +#endif + rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i + PREFETCH_OFFSET], + void *, sizeof(struct rte_ether_hdr))); + mbuf = (struct rte_mbuf *)objs[i]; + + ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) { + /* prepare mbuf: setup l2_len/l3_len. */ + mbuf->l2_len = sizeof(struct rte_ether_hdr); + mbuf->l3_len = sizeof(struct rte_ipv4_hdr); + + mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(), + ipv4_hdr); + } else { + mbuf_out = mbuf; + } + + if (mbuf_out) + to_next[idx++] = (void *)mbuf_out; + } + + for (; i < nb_objs; i++) { + mbuf = (struct rte_mbuf *)objs[i]; + + ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) { + /* prepare mbuf: setup l2_len/l3_len. */ + mbuf->l2_len = sizeof(struct rte_ether_hdr); + mbuf->l3_len = sizeof(struct rte_ipv4_hdr); + + mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(), + ipv4_hdr); + } else { + mbuf_out = mbuf; + } + + if (mbuf_out) + to_next[idx++] = (void *)mbuf_out; + } + node->idx = idx; + rte_node_next_stream_move(graph, node, 1); + if (dr->cnt) { + to_free = rte_node_next_stream_get(graph, node, + RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP, dr->cnt); + rte_memcpy(to_free, dr->row, dr->cnt * sizeof(to_free[0])); + rte_node_next_stream_put(graph, node, RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP, + dr->cnt); + idx += dr->cnt; + dr->cnt = 0; + } + + return idx; +} + +int +rte_node_ip4_reassembly_configure(struct rte_node_ip4_reassembly_cfg *cfg, uint16_t cnt) +{ + ip4_reassembly_elem_t *elem; + int i; + + for (i = 0; i < cnt; i++) { + elem = malloc(sizeof(ip4_reassembly_elem_t)); + if (elem == NULL) + return -ENOMEM; + elem->ctx.dr = cfg[i].dr; + elem->ctx.tbl = cfg[i].tbl; + elem->node_id = cfg[i].node_id; + elem->next = ip4_reassembly_main.head; + ip4_reassembly_main.head = elem; + } + + return 0; +} + +static int +ip4_reassembly_node_init(const struct rte_graph *graph, struct rte_node *node) +{ + ip4_reassembly_ctx_t *ctx = (ip4_reassembly_ctx_t *)node->ctx; + ip4_reassembly_elem_t *elem = ip4_reassembly_main.head; + + RTE_SET_USED(graph); + while (elem) { + if (elem->node_id == node->id) { + /* Update node specific context */ + memcpy(ctx, &elem->ctx, sizeof(ip4_reassembly_ctx_t)); + break; + } + elem = elem->next; + } + + return 0; +} + +static struct rte_node_register ip4_reassembly_node = { + .process = ip4_reassembly_node_process, + .name = "ip4_reassembly", + + .init = ip4_reassembly_node_init, + + .nb_edges = RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP + 1, + .next_nodes = { + [RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP] = "pkt_drop", + }, +}; + +struct rte_node_register * +ip4_reassembly_node_get(void) +{ + return &ip4_reassembly_node; +} + +RTE_NODE_REGISTER(ip4_reassembly_node); diff --git a/lib/node/ip4_reassembly_priv.h b/lib/node/ip4_reassembly_priv.h new file mode 100644 index 00000000000..1fa70274bad --- /dev/null +++ b/lib/node/ip4_reassembly_priv.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell International Ltd. + */ + +#ifndef __INCLUDE_IP4_REASSEMBLY_PRIV_H__ +#define __INCLUDE_IP4_REASSEMBLY_PRIV_H__ + +/** + * @internal + * + * Ip4_reassembly context structure. + */ +struct ip4_reassembly_ctx { + struct rte_ip_frag_tbl *tbl; + struct rte_ip_frag_death_row *dr; +}; + +/** + * @internal + * + * Get the IP4 reassembly node + * + * @return + * Pointer to the IP4 reassembly node. + */ +struct rte_node_register *ip4_reassembly_node_get(void); + +#endif /* __INCLUDE_IP4_REASSEMBLY_PRIV_H__ */ diff --git a/lib/node/ip6_lookup.c b/lib/node/ip6_lookup.c index 646e466551c..6f56eb5ec5d 100644 --- a/lib/node/ip6_lookup.c +++ b/lib/node/ip6_lookup.c @@ -362,7 +362,7 @@ static struct rte_node_register ip6_lookup_node = { .init = ip6_lookup_node_init, - .nb_edges = RTE_NODE_IP6_LOOKUP_NEXT_MAX, + .nb_edges = RTE_NODE_IP6_LOOKUP_NEXT_PKT_DROP + 1, .next_nodes = { [RTE_NODE_IP6_LOOKUP_NEXT_REWRITE] = "ip6_rewrite", [RTE_NODE_IP6_LOOKUP_NEXT_PKT_DROP] = "pkt_drop", diff --git a/lib/node/meson.build b/lib/node/meson.build index 2fa7c1c8f35..0bed97a96c3 100644 --- a/lib/node/meson.build +++ b/lib/node/meson.build @@ -11,7 +11,9 @@ sources = files( 'ethdev_ctrl.c', 'ethdev_rx.c', 'ethdev_tx.c', + 'ip4_local.c', 'ip4_lookup.c', + 'ip4_reassembly.c', 'ip4_rewrite.c', 'ip6_lookup.c', 'ip6_rewrite.c', @@ -21,13 +23,15 @@ sources = files( 'null.c', 'pkt_cls.c', 'pkt_drop.c', + 'udp4_input.c', ) headers = files( 'rte_node_eth_api.h', 'rte_node_ip4_api.h', 'rte_node_ip6_api.h', + 'rte_node_udp4_input_api.h', ) # Strict-aliasing rules are violated by uint8_t[] to context size casts. cflags += '-fno-strict-aliasing' -deps += ['graph', 'mbuf', 'lpm', 'ethdev', 'mempool', 'cryptodev'] +deps += ['graph', 'mbuf', 'lpm', 'ethdev', 'mempool', 'cryptodev', 'ip_frag'] diff --git a/lib/node/rte_node_ip4_api.h b/lib/node/rte_node_ip4_api.h index 3397da0ae8c..a84dbb3b549 100644 --- a/lib/node/rte_node_ip4_api.h +++ b/lib/node/rte_node_ip4_api.h @@ -22,18 +22,51 @@ extern "C" { #include #include +#include + /** * IP4 lookup next nodes. */ enum rte_node_ip4_lookup_next { RTE_NODE_IP4_LOOKUP_NEXT_REWRITE, /**< Rewrite node. */ + RTE_NODE_IP4_LOOKUP_NEXT_IP4_LOCAL, + /** IP Local node. */ RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP, - /**< Packet drop node. */ - RTE_NODE_IP4_LOOKUP_NEXT_MAX, /**< Number of next nodes of lookup node. */ }; +/** + * IP4 Local next nodes. + */ +enum rte_node_ip4_local_next { + RTE_NODE_IP4_LOCAL_NEXT_UDP4_INPUT, + /**< ip4 Local node. */ + RTE_NODE_IP4_LOCAL_NEXT_PKT_DROP, + /**< Packet drop node. */ +}; + +/** + * IP4 reassembly next nodes. + */ +enum rte_node_ip4_reassembly_next { + RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP, + /**< Packet drop node. */ +}; + +/** + * Reassembly configure structure. + * @see rte_node_ip4_reassembly_configure + */ +struct rte_node_ip4_reassembly_cfg { + struct rte_ip_frag_tbl *tbl; + /**< Reassembly fragmentation table. */ + struct rte_ip_frag_death_row *dr; + /**< Reassembly deathrow table. */ + rte_node_t node_id; + /**< Node identifier to configure. */ +}; + /** * Add ipv4 route to lookup table. * @@ -72,6 +105,20 @@ __rte_experimental int rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data, uint8_t rewrite_len, uint16_t dst_port); +/** + * Add reassembly node configuration data. + * + * @param cfg + * Pointer to the configuration structure. + * @param cnt + * Number of configuration structures passed. + * + * @return + * 0 on success, negative otherwise. + */ +__rte_experimental +int rte_node_ip4_reassembly_configure(struct rte_node_ip4_reassembly_cfg *cfg, uint16_t cnt); + #ifdef __cplusplus } #endif diff --git a/lib/node/rte_node_ip6_api.h b/lib/node/rte_node_ip6_api.h index f3b5a1002a0..a538dc2ea70 100644 --- a/lib/node/rte_node_ip6_api.h +++ b/lib/node/rte_node_ip6_api.h @@ -30,8 +30,6 @@ enum rte_node_ip6_lookup_next { /**< Rewrite node. */ RTE_NODE_IP6_LOOKUP_NEXT_PKT_DROP, /**< Packet drop node. */ - RTE_NODE_IP6_LOOKUP_NEXT_MAX, - /**< Number of next nodes of lookup node. */ }; /** diff --git a/lib/node/rte_node_udp4_input_api.h b/lib/node/rte_node_udp4_input_api.h new file mode 100644 index 00000000000..c873acbbe05 --- /dev/null +++ b/lib/node/rte_node_udp4_input_api.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell International Ltd. + */ + +#ifndef __INCLUDE_RTE_NODE_UDP4_INPUT_API_H__ +#define __INCLUDE_RTE_NODE_UDP4_INPUT_API_H__ + +/** + * @file rte_node_udp4_input_api.h + * + * @warning + * @b EXPERIMENTAL: + * All functions in this file may be changed or removed without prior notice. + * + * This API allows to control path functions of udp4_* nodes + * like udp4_input. + * + */ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include "rte_graph.h" +/** + * UDP4 lookup next nodes. + */ +enum rte_node_udp4_input_next { + RTE_NODE_UDP4_INPUT_NEXT_PKT_DROP, + /**< Packet drop node. */ +}; + +/** + * Add usr node to receive udp4 frames. + * + * @param usr_node + * Node registered by user to receive data. + */ +__rte_experimental +int rte_node_udp4_usr_node_add(const char *usr_node); + +/** + * Add udpv4 dst_port to lookup table. + * + * @param dst_port + * Dst Port of packet to be added for consumption. + * @param next_node + * Next node packet to be added for consumption. + * @return + * 0 on success, negative otherwise. + */ +__rte_experimental +int rte_node_udp4_dst_port_add(uint32_t dst_port, rte_edge_t next_node); + +#ifdef __cplusplus +} +#endif + +#endif /* __INCLUDE_RTE_NODE_UDP4_API_H__ */ diff --git a/lib/node/udp4_input.c b/lib/node/udp4_input.c new file mode 100644 index 00000000000..bbcb2c70e79 --- /dev/null +++ b/lib/node/udp4_input.c @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2023 Marvell International Ltd. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_node_udp4_input_api.h" + +#include "node_private.h" + +#define UDP4_INPUT_HASH_TBL_SIZE 1024 + +#define UDP4_INPUT_NODE_HASH(ctx) \ + (((struct udp4_input_node_ctx *)ctx)->hash) + +#define UDP4_INPUT_NODE_NEXT_INDEX(ctx) \ + (((struct udp4_input_node_ctx *)ctx)->next_index) + + +/* UDP4 input global data struct */ +struct udp4_input_node_main { + struct rte_hash *hash_tbl[RTE_MAX_NUMA_NODES]; +}; + +static struct udp4_input_node_main udp4_input_nm; + +struct udp4_input_node_ctx { + /* Socket's Hash table */ + struct rte_hash *hash; + /* Cached next index */ + uint16_t next_index; +}; + +struct flow_key { + uint32_t prt_dst; +}; + +static struct rte_hash_parameters udp4_params = { + .entries = UDP4_INPUT_HASH_TBL_SIZE, + .key_len = sizeof(uint32_t), + .hash_func = rte_jhash, + .hash_func_init_val = 0, + .socket_id = 0, +}; + +int +rte_node_udp4_dst_port_add(uint32_t dst_port, rte_edge_t next_node) +{ + uint8_t socket; + int rc; + + for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) { + if (!udp4_input_nm.hash_tbl[socket]) + continue; + + rc = rte_hash_add_key_data(udp4_input_nm.hash_tbl[socket], + &dst_port, (void *)(uintptr_t)next_node); + if (rc < 0) { + node_err("udp4_lookup", "Failed to add key for sock %u, rc=%d", + socket, rc); + return rc; + } + } + return 0; +} + +int +rte_node_udp4_usr_node_add(const char *usr_node) +{ + const char *next_nodes = usr_node; + rte_node_t udp4_input_node_id, count; + + udp4_input_node_id = rte_node_from_name("udp4_input"); + count = rte_node_edge_update(udp4_input_node_id, RTE_EDGE_ID_INVALID, + &next_nodes, 1); + if (count == 0) { + node_dbg("udp4_input", "Adding usr node as edge to udp4_input failed"); + return count; + } + count = rte_node_edge_count(udp4_input_node_id) - 1; + return count; +} + +static int +setup_udp4_dstprt_hash(struct udp4_input_node_main *nm, int socket) +{ + struct rte_hash_parameters *hash_udp4 = &udp4_params; + char s[RTE_HASH_NAMESIZE]; + + /* One Hash table per socket */ + if (nm->hash_tbl[socket]) + return 0; + + /* create Hash table */ + snprintf(s, sizeof(s), "UDP4_INPUT_HASH_%d", socket); + hash_udp4->name = s; + hash_udp4->socket_id = socket; + nm->hash_tbl[socket] = rte_hash_create(hash_udp4); + if (nm->hash_tbl[socket] == NULL) + return -rte_errno; + + return 0; +} + +static int +udp4_input_node_init(const struct rte_graph *graph, struct rte_node *node) +{ + uint16_t socket, lcore_id; + static uint8_t init_once; + int rc; + + RTE_SET_USED(graph); + RTE_BUILD_BUG_ON(sizeof(struct udp4_input_node_ctx) > RTE_NODE_CTX_SZ); + + if (!init_once) { + + /* Setup HASH tables for all sockets */ + RTE_LCORE_FOREACH(lcore_id) + { + socket = rte_lcore_to_socket_id(lcore_id); + rc = setup_udp4_dstprt_hash(&udp4_input_nm, socket); + if (rc) { + node_err("udp4_lookup", + "Failed to setup hash tbl for sock %u, rc=%d", + socket, rc); + return rc; + } + } + init_once = 1; + } + + UDP4_INPUT_NODE_HASH(node->ctx) = udp4_input_nm.hash_tbl[graph->socket]; + + node_dbg("udp4_input", "Initialized udp4_input node"); + return 0; +} + +static uint16_t +udp4_input_node_process_scalar(struct rte_graph *graph, struct rte_node *node, + void **objs, uint16_t nb_objs) +{ + struct rte_hash *hash_tbl_handle = UDP4_INPUT_NODE_HASH(node->ctx); + rte_edge_t next_index, udplookup_node; + struct rte_udp_hdr *pkt_udp_hdr; + uint16_t last_spec = 0; + void **to_next, **from; + struct rte_mbuf *mbuf; + uint16_t held = 0; + uint16_t next = 0; + int i, rc; + + /* Speculative next */ + next_index = UDP4_INPUT_NODE_NEXT_INDEX(node->ctx); + + from = objs; + + to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs); + for (i = 0; i < nb_objs; i++) { + struct flow_key key_port; + + mbuf = (struct rte_mbuf *)objs[i]; + pkt_udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr)); + + key_port.prt_dst = rte_cpu_to_be_16(pkt_udp_hdr->dst_port); + rc = rte_hash_lookup_data(hash_tbl_handle, + &key_port.prt_dst, + (void **)&udplookup_node); + next = (rc < 0) ? RTE_NODE_UDP4_INPUT_NEXT_PKT_DROP + : udplookup_node; + + if (unlikely(next_index != next)) { + /* Copy things successfully speculated till now */ + rte_memcpy(to_next, from, last_spec * sizeof(from[0])); + from += last_spec; + to_next += last_spec; + held += last_spec; + last_spec = 0; + + rte_node_enqueue_x1(graph, node, next, from[0]); + from += 1; + } else { + last_spec += 1; + } + } + /* !!! Home run !!! */ + if (likely(last_spec == nb_objs)) { + rte_node_next_stream_move(graph, node, next_index); + return nb_objs; + } + held += last_spec; + rte_memcpy(to_next, from, last_spec * sizeof(from[0])); + rte_node_next_stream_put(graph, node, next_index, held); + /* Save the last next used */ + UDP4_INPUT_NODE_NEXT_INDEX(node->ctx) = next; + + return nb_objs; +} + +static struct rte_node_register udp4_input_node = { + .process = udp4_input_node_process_scalar, + .name = "udp4_input", + + .init = udp4_input_node_init, + + .nb_edges = RTE_NODE_UDP4_INPUT_NEXT_PKT_DROP + 1, + .next_nodes = { + [RTE_NODE_UDP4_INPUT_NEXT_PKT_DROP] = "pkt_drop", + }, +}; + +RTE_NODE_REGISTER(udp4_input_node); diff --git a/lib/node/version.map b/lib/node/version.map index 40df308bfe8..3855569228e 100644 --- a/lib/node/version.map +++ b/lib/node/version.map @@ -1,11 +1,20 @@ EXPERIMENTAL { global: + # added in 20.05 rte_node_eth_config; rte_node_ip4_route_add; rte_node_ip4_rewrite_add; + rte_node_logtype; + + # added in 23.07 rte_node_ip6_rewrite_add; rte_node_ip6_route_add; - rte_node_logtype; + + # added in 23.11 + rte_node_ip4_reassembly_configure; + rte_node_udp4_dst_port_add; + rte_node_udp4_usr_node_add; + local: *; }; diff --git a/lib/pci/rte_pci.h b/lib/pci/rte_pci.h index aab761b9181..69e932d9101 100644 --- a/lib/pci/rte_pci.h +++ b/lib/pci/rte_pci.h @@ -28,15 +28,69 @@ extern "C" { #define RTE_PCI_CFG_SPACE_SIZE 256 #define RTE_PCI_CFG_SPACE_EXP_SIZE 4096 +#define RTE_PCI_STD_HEADER_SIZEOF 64 + +/* Standard register offsets in the PCI configuration space */ #define RTE_PCI_VENDOR_ID 0x00 /* 16 bits */ #define RTE_PCI_DEVICE_ID 0x02 /* 16 bits */ #define RTE_PCI_COMMAND 0x04 /* 16 bits */ - -/* PCI Command Register */ -#define RTE_PCI_COMMAND_MASTER 0x4 /* Bus Master Enable */ - -/* PCI Express capability registers */ -#define RTE_PCI_EXP_DEVCTL 8 /* Device Control */ +#define RTE_PCI_STATUS 0x06 /* 16 bits */ +#define RTE_PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ +#define RTE_PCI_CAPABILITY_LIST 0x34 /* 32 bits */ + +/* PCI Command Register (RTE_PCI_COMMAND) */ +#define RTE_PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ +#define RTE_PCI_COMMAND_MASTER 0x4 /* Bus Master Enable */ +#define RTE_PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ + +/* PCI Status Register (RTE_PCI_STATUS) */ +#define RTE_PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ + +/* Base addresses (RTE_PCI_BASE_ADDRESS_*) */ +#define RTE_PCI_BASE_ADDRESS_SPACE_IO 0x01 + +/* Capability registers (RTE_PCI_CAPABILITY_LIST) */ +#define RTE_PCI_CAP_ID_PM 0x01 /* Power Management */ +#define RTE_PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ +#define RTE_PCI_CAP_ID_VNDR 0x09 /* Vendor-Specific */ +#define RTE_PCI_CAP_ID_EXP 0x10 /* PCI Express */ +#define RTE_PCI_CAP_ID_MSIX 0x11 /* MSI-X */ +#define RTE_PCI_CAP_SIZEOF 4 +#define RTE_PCI_CAP_NEXT 1 + +/* Power Management Registers (RTE_PCI_CAP_ID_PM) */ +#define RTE_PCI_PM_CTRL 4 /* PM control and status register */ +#define RTE_PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ +#define RTE_PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ +#define RTE_PCI_PM_CTRL_PME_STATUS 0x8000 /* PME pin status */ + +/* PCI Express capability registers (RTE_PCI_CAP_ID_EXP) */ +#define RTE_PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#define RTE_PCI_EXP_DEVCTL 0x08 /* Device Control */ +#define RTE_PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ +#define RTE_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ +#define RTE_PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ +#define RTE_PCI_EXP_DEVSTA 0x0a /* Device Status */ +#define RTE_PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */ +#define RTE_PCI_EXP_LNKCTL 0x10 /* Link Control */ +#define RTE_PCI_EXP_LNKSTA 0x12 /* Link Status */ +#define RTE_PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#define RTE_PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#define RTE_PCI_EXP_SLTCTL 0x18 /* Slot Control */ +#define RTE_PCI_EXP_RTCTL 0x1c /* Root Control */ +#define RTE_PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */ +#define RTE_PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */ +#define RTE_PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */ + +/* MSI-X registers (RTE_PCI_CAP_ID_MSIX) */ +#define RTE_PCI_MSIX_FLAGS 2 /* Message Control */ +#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff /* Table size */ +#define RTE_PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */ +#define RTE_PCI_MSIX_FLAGS_ENABLE 0x8000 /* MSI-X enable */ + +#define RTE_PCI_MSIX_TABLE 4 /* Table offset */ +#define RTE_PCI_MSIX_TABLE_BIR 0x00000007 /* BAR index */ +#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8 /* Offset into specified BAR */ /* Extended Capabilities (PCI-X 2.0 and Express) */ #define RTE_PCI_EXT_CAP_ID(header) (header & 0x0000ffff) @@ -44,9 +98,25 @@ extern "C" { #define RTE_PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */ #define RTE_PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */ -#define RTE_PCI_EXT_CAP_ID_SRIOV 0x10 /* SR-IOV*/ - -/* Single Root I/O Virtualization */ +#define RTE_PCI_EXT_CAP_ID_ACS 0x0d /* Access Control Services */ +#define RTE_PCI_EXT_CAP_ID_SRIOV 0x10 /* SR-IOV */ +#define RTE_PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */ + +/* Advanced Error Reporting (RTE_PCI_EXT_CAP_ID_ERR) */ +#define RTE_PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */ +#define RTE_PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */ +#define RTE_PCI_ERR_ROOT_STATUS 0x30 + +/* Access Control Service (RTE_PCI_EXT_CAP_ID_ACS) */ +#define RTE_PCI_ACS_CAP 0x04 /* ACS Capability Register */ +#define RTE_PCI_ACS_CTRL 0x06 /* ACS Control Register */ +#define RTE_PCI_ACS_SV 0x0001 /* Source Validation */ +#define RTE_PCI_ACS_RR 0x0004 /* P2P Request Redirect */ +#define RTE_PCI_ACS_CR 0x0008 /* P2P Completion Redirect */ +#define RTE_PCI_ACS_UF 0x0010 /* Upstream Forwarding */ +#define RTE_PCI_ACS_EC 0x0020 /* P2P Egress Control */ + +/* Single Root I/O Virtualization (RTE_PCI_EXT_CAP_ID_SRIOV) */ #define RTE_PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ #define RTE_PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ #define RTE_PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ @@ -58,6 +128,11 @@ extern "C" { #define RTE_PCI_SRIOV_VF_DID 0x1a /* VF Device ID */ #define RTE_PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */ +/* Page Request Interface (RTE_PCI_EXT_CAP_ID_PRI) */ +#define RTE_PCI_PRI_CTRL 0x04 /* PRI control register */ +#define RTE_PCI_PRI_CTRL_ENABLE 0x0001 /* Enable */ +#define RTE_PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ + /** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */ #define PCI_PRI_FMT "%.4" PRIx32 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8 #define PCI_PRI_STR_SIZE sizeof("XXXXXXXX:XX:XX.X") diff --git a/lib/pdcp/pdcp_entity.h b/lib/pdcp/pdcp_entity.h index 9f74b5d0e5e..4fc6342a5ce 100644 --- a/lib/pdcp/pdcp_entity.h +++ b/lib/pdcp/pdcp_entity.h @@ -171,6 +171,8 @@ struct entity_priv { uint8_t hdr_sz; /** PDCP AAD size. For AES-CMAC, additional message is prepended for the operation. */ uint8_t aad_sz; + /** PDCP cipher skip size. When enabled, SDAP header needs to be skipped from ciphering */ + uint8_t cipher_skip_sz; /** Device ID of the device to be used for offload. */ uint8_t dev_id; }; diff --git a/lib/pdcp/pdcp_process.c b/lib/pdcp/pdcp_process.c index c2f28d4d778..9b9b881124b 100644 --- a/lib/pdcp/pdcp_process.c +++ b/lib/pdcp/pdcp_process.c @@ -391,7 +391,7 @@ pdcp_pre_process_uplane_sn_12_ul(const struct rte_pdcp_entity *entity, struct rt uint8_t *mac_i; int i; - const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz; + const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz; const int is_null_auth = en_priv->flags.is_null_auth; nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop, @@ -477,7 +477,7 @@ pdcp_pre_process_uplane_sn_18_ul(const struct rte_pdcp_entity *entity, struct rt uint8_t *mac_i; int i; - const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz; + const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz; const int is_null_auth = en_priv->flags.is_null_auth; nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop, @@ -540,7 +540,7 @@ pdcp_pre_process_cplane_sn_12_ul(const struct rte_pdcp_entity *entity, struct rt int i; const uint8_t hdr_sz = en_priv->hdr_sz; - const uint8_t data_offset = hdr_sz + en_priv->aad_sz; + const uint8_t data_offset = hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz; const int is_null_auth = en_priv->flags.is_null_auth; nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop, @@ -658,7 +658,7 @@ pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity *entity, uint32_t count; int i; - const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz; + const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz; nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop, num); @@ -727,7 +727,7 @@ pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity *entity, uint32_t count; int i; - const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz; + const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz; nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop, num); @@ -795,7 +795,7 @@ pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct rt int32_t rsn; int i; - const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz; + const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz; nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop, num); @@ -1202,6 +1202,16 @@ pdcp_entity_priv_populate(struct entity_priv *en_priv, const struct rte_pdcp_ent else en_priv->aad_sz = 0; + /** + * cipher_skip_sz + * + * When SDAP protocol is enabled for the PDCP entity, skip the SDAP header from ciphering. + */ + if (conf->pdcp_xfrm.sdap_enabled) + en_priv->cipher_skip_sz = 1; + else + en_priv->cipher_skip_sz = 0; + return 0; } diff --git a/lib/pipeline/rte_pipeline.c b/lib/pipeline/rte_pipeline.c index 1fa9f9c47ed..436cf549534 100644 --- a/lib/pipeline/rte_pipeline.c +++ b/lib/pipeline/rte_pipeline.c @@ -17,7 +17,7 @@ #ifdef RTE_PIPELINE_STATS_COLLECT #define RTE_PIPELINE_STATS_AH_DROP_WRITE(p, mask) \ - ({ (p)->n_pkts_ah_drop = __builtin_popcountll(mask); }) + ({ (p)->n_pkts_ah_drop = rte_popcount64(mask); }) #define RTE_PIPELINE_STATS_AH_DROP_READ(p, counter) \ ({ (counter) += (p)->n_pkts_ah_drop; (p)->n_pkts_ah_drop = 0; }) @@ -29,7 +29,7 @@ ({ \ uint64_t mask = (p)->action_mask0[RTE_PIPELINE_ACTION_DROP]; \ mask ^= (p)->pkts_drop_mask; \ - (counter) += __builtin_popcountll(mask); \ + (counter) += rte_popcount64(mask); \ }) #else @@ -133,7 +133,7 @@ rte_mask_get_next(uint64_t mask, uint32_t pos) { uint64_t mask_rot = (mask << ((63 - pos) & 0x3F)) | (mask >> ((pos + 1) & 0x3F)); - return (__builtin_ctzll(mask_rot) - (63 - pos)) & 0x3F; + return (rte_ctz64(mask_rot) - (63 - pos)) & 0x3F; } static inline uint32_t @@ -141,7 +141,7 @@ rte_mask_get_prev(uint64_t mask, uint32_t pos) { uint64_t mask_rot = (mask >> (pos & 0x3F)) | (mask << ((64 - pos) & 0x3F)); - return ((63 - __builtin_clzll(mask_rot)) + pos) & 0x3F; + return ((63 - rte_clz64(mask_rot)) + pos) & 0x3F; } static void @@ -1082,7 +1082,7 @@ rte_pipeline_compute_masks(struct rte_pipeline *p, uint64_t pkts_mask) p->action_mask1[RTE_PIPELINE_ACTION_TABLE] = 0; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) { @@ -1136,7 +1136,7 @@ rte_pipeline_action_handler_port(struct rte_pipeline *p, uint64_t pkts_mask) p->pkts_mask = pkts_mask; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) { @@ -1209,7 +1209,7 @@ rte_pipeline_action_handler_port_meta(struct rte_pipeline *p, p->pkts_mask = pkts_mask; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) { @@ -1282,7 +1282,7 @@ static inline void rte_pipeline_action_handler_drop(struct rte_pipeline *p, uint64_t pkts_mask) { if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) diff --git a/lib/pipeline/rte_swx_ipsec.c b/lib/pipeline/rte_swx_ipsec.c index 6c217ee7977..28576c2a481 100644 --- a/lib/pipeline/rte_swx_ipsec.c +++ b/lib/pipeline/rte_swx_ipsec.c @@ -1555,7 +1555,6 @@ ipsec_xform_get(struct rte_swx_ipsec_sa_params *p, ipsec_xform->options.ip_csum_enable = 0; ipsec_xform->options.l4_csum_enable = 0; ipsec_xform->options.ip_reassembly_en = 0; - ipsec_xform->options.reserved_opts = 0; ipsec_xform->direction = p->encrypt ? RTE_SECURITY_IPSEC_SA_DIR_EGRESS : diff --git a/lib/pipeline/rte_swx_pipeline_internal.h b/lib/pipeline/rte_swx_pipeline_internal.h index 2f24e1a1c52..a67b6e965de 100644 --- a/lib/pipeline/rte_swx_pipeline_internal.h +++ b/lib/pipeline/rte_swx_pipeline_internal.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/lib/pipeline/rte_table_action.c b/lib/pipeline/rte_table_action.c index 59d71ee50db..dfdbc66b087 100644 --- a/lib/pipeline/rte_table_action.c +++ b/lib/pipeline/rte_table_action.c @@ -386,7 +386,7 @@ tm_apply(struct tm_data *data, /* Apply */ data->queue_id = p->subport_id << - (__builtin_ctz(cfg->n_pipes_per_subport) + 4) | + (rte_ctz32(cfg->n_pipes_per_subport) + 4) | p->pipe_id << 4; return 0; @@ -429,7 +429,7 @@ static int encap_cfg_check(struct rte_table_action_encap_config *encap) { if ((encap->encap_mask == 0) || - (__builtin_popcountll(encap->encap_mask) != 1)) + (rte_popcount64(encap->encap_mask) != 1)) return -ENOTSUP; return 0; @@ -3364,7 +3364,7 @@ ah(struct rte_pipeline *p, time = rte_rdtsc(); if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { @@ -3392,7 +3392,7 @@ ah(struct rte_pipeline *p, } } else for ( ; pkts_mask; ) { - uint32_t pos = __builtin_ctzll(pkts_mask); + uint32_t pos = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pos; uint64_t drop_mask; diff --git a/lib/port/rte_port_ethdev.c b/lib/port/rte_port_ethdev.c index 0da7890261e..e6bb7ee480e 100644 --- a/lib/port/rte_port_ethdev.c +++ b/lib/port/rte_port_ethdev.c @@ -206,7 +206,7 @@ rte_port_ethdev_writer_tx_bulk(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) @@ -224,7 +224,7 @@ rte_port_ethdev_writer_tx_bulk(void *port, } } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; @@ -413,7 +413,7 @@ rte_port_ethdev_writer_nodrop_tx_bulk(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) @@ -437,7 +437,7 @@ rte_port_ethdev_writer_nodrop_tx_bulk(void *port, send_burst_nodrop(p); } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/port/rte_port_eventdev.c b/lib/port/rte_port_eventdev.c index fd7dac9a566..13350fd608f 100644 --- a/lib/port/rte_port_eventdev.c +++ b/lib/port/rte_port_eventdev.c @@ -231,7 +231,7 @@ rte_port_eventdev_writer_tx_bulk(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i, n_enq_ok; if (enq_buf_count) @@ -257,7 +257,7 @@ rte_port_eventdev_writer_tx_bulk(void *port, } else { for (; pkts_mask;) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; p->ev[enq_buf_count++].mbuf = pkts[pkt_index]; @@ -463,7 +463,7 @@ rte_port_eventdev_writer_nodrop_tx_bulk(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i, n_enq_ok; if (enq_buf_count) @@ -497,7 +497,7 @@ rte_port_eventdev_writer_nodrop_tx_bulk(void *port, send_burst_nodrop(p); } else { for (; pkts_mask;) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; p->ev[enq_buf_count++].mbuf = pkts[pkt_index]; diff --git a/lib/port/rte_port_fd.c b/lib/port/rte_port_fd.c index 932ecd324e0..7e140793b24 100644 --- a/lib/port/rte_port_fd.c +++ b/lib/port/rte_port_fd.c @@ -239,7 +239,7 @@ rte_port_fd_writer_tx_bulk(void *port, uint32_t tx_buf_count = p->tx_buf_count; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) @@ -247,7 +247,7 @@ rte_port_fd_writer_tx_bulk(void *port, RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, n_pkts); } else for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; @@ -424,7 +424,7 @@ rte_port_fd_writer_nodrop_tx_bulk(void *port, uint32_t tx_buf_count = p->tx_buf_count; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) @@ -432,7 +432,7 @@ rte_port_fd_writer_nodrop_tx_bulk(void *port, RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts); } else for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/port/rte_port_ras.c b/lib/port/rte_port_ras.c index e5de57da42e..15109661d13 100644 --- a/lib/port/rte_port_ras.c +++ b/lib/port/rte_port_ras.c @@ -234,7 +234,7 @@ rte_port_ring_writer_ras_tx_bulk(void *port, port; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) { @@ -247,7 +247,7 @@ rte_port_ring_writer_ras_tx_bulk(void *port, } } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/port/rte_port_ring.c b/lib/port/rte_port_ring.c index 52b2d8e557f..002efb7c3ec 100644 --- a/lib/port/rte_port_ring.c +++ b/lib/port/rte_port_ring.c @@ -279,7 +279,7 @@ rte_port_ring_writer_tx_bulk_internal(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) { @@ -305,7 +305,7 @@ rte_port_ring_writer_tx_bulk_internal(void *port, } } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; @@ -595,7 +595,7 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) { @@ -633,7 +633,7 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port, send_burst_nodrop(p); } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/port/rte_port_sched.c b/lib/port/rte_port_sched.c index 8a7d815ef32..f6255c4346b 100644 --- a/lib/port/rte_port_sched.c +++ b/lib/port/rte_port_sched.c @@ -191,7 +191,7 @@ rte_port_sched_writer_tx_bulk(void *port, if (expr == 0) { __rte_unused uint32_t nb_tx; - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); if (tx_buf_count) { nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, @@ -204,7 +204,7 @@ rte_port_sched_writer_tx_bulk(void *port, RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - nb_tx); } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/port/rte_port_source_sink.c b/lib/port/rte_port_source_sink.c index 7d73adc1e79..ff9677cdfe2 100644 --- a/lib/port/rte_port_source_sink.c +++ b/lib/port/rte_port_source_sink.c @@ -500,7 +500,7 @@ rte_port_sink_tx_bulk(void *port, struct rte_mbuf **pkts, struct rte_port_sink *p = port; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; RTE_PORT_SINK_STATS_PKTS_IN_ADD(p, n_pkts); @@ -523,7 +523,7 @@ rte_port_sink_tx_bulk(void *port, struct rte_mbuf **pkts, uint32_t pkt_index; for ( ; dump_pkts_mask; ) { - pkt_index = __builtin_ctzll( + pkt_index = rte_ctz64( dump_pkts_mask); PCAP_SINK_WRITE_PKT(p, pkts[pkt_index]); dump_pkts_mask &= ~(1LLU << pkt_index); @@ -531,7 +531,7 @@ rte_port_sink_tx_bulk(void *port, struct rte_mbuf **pkts, } for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/port/rte_port_sym_crypto.c b/lib/port/rte_port_sym_crypto.c index 295984d025c..27b7e07cea4 100644 --- a/lib/port/rte_port_sym_crypto.c +++ b/lib/port/rte_port_sym_crypto.c @@ -235,7 +235,7 @@ rte_port_sym_crypto_writer_tx_bulk(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(p, n_pkts); @@ -249,7 +249,7 @@ rte_port_sym_crypto_writer_tx_bulk(void *port, send_burst(p); } else { for (; pkts_mask;) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; @@ -447,7 +447,7 @@ rte_port_sym_crypto_writer_nodrop_tx_bulk(void *port, ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts); @@ -461,7 +461,7 @@ rte_port_sym_crypto_writer_nodrop_tx_bulk(void *port, send_burst_nodrop(p); } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; diff --git a/lib/power/meson.build b/lib/power/meson.build index 1ce8b7c07df..b8426589b24 100644 --- a/lib/power/meson.build +++ b/lib/power/meson.build @@ -14,19 +14,21 @@ endif sources = files( 'guest_channel.c', 'power_acpi_cpufreq.c', + 'power_amd_pstate_cpufreq.c', 'power_common.c', 'power_cppc_cpufreq.c', 'power_kvm_vm.c', + 'power_intel_uncore.c', 'power_pstate_cpufreq.c', 'rte_power.c', - 'rte_power_intel_uncore.c', + 'rte_power_uncore.c', 'rte_power_pmd_mgmt.c', ) headers = files( 'rte_power.h', - 'rte_power_intel_uncore.h', - 'rte_power_pmd_mgmt.h', 'rte_power_guest_channel.h', + 'rte_power_pmd_mgmt.h', + 'rte_power_uncore.h', ) if cc.has_argument('-Wno-cast-qual') cflags += '-Wno-cast-qual' diff --git a/lib/power/power_amd_pstate_cpufreq.c b/lib/power/power_amd_pstate_cpufreq.c new file mode 100644 index 00000000000..dbd9d2b3ee1 --- /dev/null +++ b/lib/power/power_amd_pstate_cpufreq.c @@ -0,0 +1,702 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2021 Intel Corporation + * Copyright(c) 2021 Arm Limited + * Copyright(c) 2023 Amd Limited + */ + +#include + +#include +#include + +#include "power_amd_pstate_cpufreq.h" +#include "power_common.h" + +/* macros used for rounding frequency to nearest 1000 */ +#define FREQ_ROUNDING_DELTA 500 +#define ROUND_FREQ_TO_N_1000 1000 + +#define POWER_CONVERT_TO_DECIMAL 10 + +#define POWER_GOVERNOR_USERSPACE "userspace" +#define POWER_SYSFILE_SETSPEED \ + "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_setspeed" +#define POWER_SYSFILE_SCALING_MAX_FREQ \ + "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_max_freq" +#define POWER_SYSFILE_SCALING_MIN_FREQ \ + "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_min_freq" +#define POWER_SYSFILE_HIGHEST_PERF \ + "/sys/devices/system/cpu/cpu%u/acpi_cppc/highest_perf" +#define POWER_SYSFILE_NOMINAL_PERF \ + "/sys/devices/system/cpu/cpu%u/acpi_cppc/nominal_perf" +#define POWER_SYSFILE_NOMINAL_FREQ \ + "/sys/devices/system/cpu/cpu%u/acpi_cppc/nominal_freq" + +#define POWER_AMD_PSTATE_DRIVER "amd-pstate" +#define BUS_FREQ 1000 /* khz */ + +enum power_state { + POWER_IDLE = 0, + POWER_ONGOING, + POWER_USED, + POWER_UNKNOWN +}; + +/** + * Power info per lcore. + */ +struct amd_pstate_power_info { + uint32_t lcore_id; /**< Logical core id */ + RTE_ATOMIC(uint32_t) state; /**< Power in use state */ + FILE *f; /**< FD of scaling_setspeed */ + char governor_ori[28]; /**< Original governor name */ + uint32_t curr_idx; /**< Freq index in freqs array */ + uint32_t nom_idx; /**< Nominal index in freqs array */ + uint32_t highest_perf; /**< system wide max freq */ + uint32_t nominal_perf; /**< system wide nominal freq */ + uint16_t turbo_available; /**< Turbo Boost available */ + uint16_t turbo_enable; /**< Turbo Boost enable/disable */ + uint32_t nb_freqs; /**< number of available freqs */ + uint32_t freqs[RTE_MAX_LCORE_FREQS]; /**< Frequency array */ +} __rte_cache_aligned; + +static struct amd_pstate_power_info lcore_power_info[RTE_MAX_LCORE]; + +/** + * It is to set specific freq for specific logical core, according to the index + * of supported frequencies. + */ +static int +set_freq_internal(struct amd_pstate_power_info *pi, uint32_t idx) +{ + if (idx >= RTE_MAX_LCORE_FREQS || idx >= pi->nb_freqs) { + RTE_LOG(ERR, POWER, "Invalid frequency index %u, which " + "should be less than %u\n", idx, pi->nb_freqs); + return -1; + } + + /* Check if it is the same as current */ + if (idx == pi->curr_idx) + return 0; + + POWER_DEBUG_TRACE("Frequency[%u] %u to be set for lcore %u\n", + idx, pi->freqs[idx], pi->lcore_id); + if (fseek(pi->f, 0, SEEK_SET) < 0) { + RTE_LOG(ERR, POWER, "Fail to set file position indicator to 0 " + "for setting frequency for lcore %u\n", pi->lcore_id); + return -1; + } + if (fprintf(pi->f, "%u", pi->freqs[idx]) < 0) { + RTE_LOG(ERR, POWER, "Fail to write new frequency for " + "lcore %u\n", pi->lcore_id); + return -1; + } + fflush(pi->f); + pi->curr_idx = idx; + + return 1; +} + +/** + * It is to check the current scaling governor by reading sys file, and then + * set it into 'userspace' if it is not by writing the sys file. The original + * governor will be saved for rolling back. + */ +static int +power_set_governor_userspace(struct amd_pstate_power_info *pi) +{ + return power_set_governor(pi->lcore_id, POWER_GOVERNOR_USERSPACE, + pi->governor_ori, sizeof(pi->governor_ori)); +} + +static int +power_check_turbo(struct amd_pstate_power_info *pi) +{ + FILE *f_nom = NULL, *f_max = NULL; + int ret = -1; + uint32_t nominal_perf = 0, highest_perf = 0; + + open_core_sysfs_file(&f_max, "r", POWER_SYSFILE_HIGHEST_PERF, + pi->lcore_id); + if (f_max == NULL) { + RTE_LOG(ERR, POWER, "failed to open %s\n", + POWER_SYSFILE_HIGHEST_PERF); + goto err; + } + + open_core_sysfs_file(&f_nom, "r", POWER_SYSFILE_NOMINAL_PERF, + pi->lcore_id); + if (f_nom == NULL) { + RTE_LOG(ERR, POWER, "failed to open %s\n", + POWER_SYSFILE_NOMINAL_PERF); + goto err; + } + + ret = read_core_sysfs_u32(f_max, &highest_perf); + if (ret < 0) { + RTE_LOG(ERR, POWER, "Failed to read %s\n", + POWER_SYSFILE_HIGHEST_PERF); + goto err; + } + + ret = read_core_sysfs_u32(f_nom, &nominal_perf); + if (ret < 0) { + RTE_LOG(ERR, POWER, "Failed to read %s\n", + POWER_SYSFILE_NOMINAL_PERF); + goto err; + } + + pi->highest_perf = highest_perf; + pi->nominal_perf = nominal_perf; + + if (highest_perf > nominal_perf) { + pi->turbo_available = 1; + pi->turbo_enable = 1; + ret = 0; + POWER_DEBUG_TRACE("Lcore %u can do Turbo Boost! highest perf %u, " + "nominal perf %u\n", + pi->lcore_id, highest_perf, nominal_perf); + } else { + pi->turbo_available = 0; + pi->turbo_enable = 0; + POWER_DEBUG_TRACE("Lcore %u Turbo not available! highest perf %u, " + "nominal perf %u\n", + pi->lcore_id, highest_perf, nominal_perf); + } + +err: + if (f_max != NULL) + fclose(f_max); + if (f_nom != NULL) + fclose(f_nom); + + return ret; +} + +/** + * It is to get the available frequencies of the specific lcore by reading the + * sys file. + */ +static int +power_get_available_freqs(struct amd_pstate_power_info *pi) +{ + FILE *f_min = NULL, *f_max = NULL, *f_nom = NULL; + int ret = -1, nominal_idx = -1; + uint32_t scaling_min_freq = 0, scaling_max_freq = 0; + uint32_t i, num_freqs = RTE_MAX_LCORE_FREQS; + uint32_t nominal_freq = 0, scaling_freq = 0; + uint32_t freq_calc = 0; + + open_core_sysfs_file(&f_max, "r", POWER_SYSFILE_SCALING_MAX_FREQ, + pi->lcore_id); + if (f_max == NULL) { + RTE_LOG(ERR, POWER, "failed to open %s\n", + POWER_SYSFILE_SCALING_MAX_FREQ); + goto out; + } + + open_core_sysfs_file(&f_min, "r", POWER_SYSFILE_SCALING_MIN_FREQ, + pi->lcore_id); + if (f_min == NULL) { + RTE_LOG(ERR, POWER, "failed to open %s\n", + POWER_SYSFILE_SCALING_MIN_FREQ); + goto out; + } + + open_core_sysfs_file(&f_nom, "r", POWER_SYSFILE_NOMINAL_FREQ, + pi->lcore_id); + if (f_nom == NULL) { + RTE_LOG(ERR, POWER, "failed to open %s\n", + POWER_SYSFILE_NOMINAL_FREQ); + goto out; + } + + ret = read_core_sysfs_u32(f_max, &scaling_max_freq); + if (ret < 0) { + RTE_LOG(ERR, POWER, "Failed to read %s\n", + POWER_SYSFILE_SCALING_MAX_FREQ); + goto out; + } + + ret = read_core_sysfs_u32(f_min, &scaling_min_freq); + if (ret < 0) { + RTE_LOG(ERR, POWER, "Failed to read %s\n", + POWER_SYSFILE_SCALING_MIN_FREQ); + goto out; + } + + ret = read_core_sysfs_u32(f_nom, &nominal_freq); + if (ret < 0) { + RTE_LOG(ERR, POWER, "Failed to read %s\n", + POWER_SYSFILE_NOMINAL_FREQ); + goto out; + } + + power_check_turbo(pi); + + if (scaling_max_freq < scaling_min_freq) { + RTE_LOG(ERR, POWER, "scaling min freq exceeds max freq, " + "not expected! Check system power policy\n"); + goto out; + } else if (scaling_max_freq == scaling_min_freq) { + num_freqs = 1; + } + + if (num_freqs > 1) { + scaling_freq = (scaling_max_freq - scaling_min_freq); + scaling_freq <<= 10; + scaling_freq /= (num_freqs - 1); + scaling_freq >>= 10; + } else { + scaling_freq = 0; + } + + /* Generate the freq bucket array. */ + for (i = 0, pi->nb_freqs = 0; i < num_freqs; i++) { + freq_calc = scaling_max_freq - (i * scaling_freq); + /* convert the frequency to nearest 1000 value + * Ex: if freq=1396789 then freq_conv=1397000 + * Ex: if freq=800030 then freq_conv=800000 + */ + freq_calc = (freq_calc + FREQ_ROUNDING_DELTA) + / ROUND_FREQ_TO_N_1000; + freq_calc = freq_calc * ROUND_FREQ_TO_N_1000; + + /* update the frequency table only if required */ + if ((pi->nb_freqs == 0) || + pi->freqs[pi->nb_freqs-1] != freq_calc) { + pi->freqs[pi->nb_freqs++] = freq_calc; + } + if (nominal_idx == -1) { + if ((nominal_freq * BUS_FREQ) >= freq_calc) { + pi->nom_idx = pi->nb_freqs - 1; + nominal_idx = pi->nom_idx; + } + } + } + + ret = 0; + + POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n", + num_freqs, pi->lcore_id); + +out: + if (f_min != NULL) + fclose(f_min); + if (f_max != NULL) + fclose(f_max); + if (f_nom != NULL) + fclose(f_nom); + + return ret; +} + +/** + * It is to fopen the sys file for the future setting the lcore frequency. + */ +static int +power_init_for_setting_freq(struct amd_pstate_power_info *pi) +{ + FILE *f = NULL; + char buf[BUFSIZ]; + uint32_t i, freq; + int ret; + + open_core_sysfs_file(&f, "rw+", POWER_SYSFILE_SETSPEED, pi->lcore_id); + if (f == NULL) { + RTE_LOG(ERR, POWER, "failed to open %s\n", + POWER_SYSFILE_SETSPEED); + goto err; + } + + ret = read_core_sysfs_s(f, buf, sizeof(buf)); + if (ret < 0) { + RTE_LOG(ERR, POWER, "Failed to read %s\n", + POWER_SYSFILE_SETSPEED); + goto err; + } + + freq = strtoul(buf, NULL, POWER_CONVERT_TO_DECIMAL); + + /* convert the frequency to nearest 1000 value + * Ex: if freq=1396789 then freq_conv=1397000 + * Ex: if freq=800030 then freq_conv=800000 + */ + unsigned int freq_conv = 0; + freq_conv = (freq + FREQ_ROUNDING_DELTA) + / ROUND_FREQ_TO_N_1000; + freq_conv = freq_conv * ROUND_FREQ_TO_N_1000; + + for (i = 0; i < pi->nb_freqs; i++) { + if (freq_conv == pi->freqs[i]) { + pi->curr_idx = i; + pi->f = f; + return 0; + } + } + +err: + if (f != NULL) + fclose(f); + + return -1; +} + +int +power_amd_pstate_cpufreq_check_supported(void) +{ + return cpufreq_check_scaling_driver(POWER_AMD_PSTATE_DRIVER); +} + +int +power_amd_pstate_cpufreq_init(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + uint32_t exp_state; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n", + lcore_id, RTE_MAX_LCORE - 1U); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + exp_state = POWER_IDLE; + /* The power in use state works as a guard variable between + * the CPU frequency control initialization and exit process. + * The ACQUIRE memory ordering here pairs with the RELEASE + * ordering below as lock to make sure the frequency operations + * in the critical section are done under the correct state. + */ + if (!rte_atomic_compare_exchange_strong_explicit(&(pi->state), + &exp_state, POWER_ONGOING, + rte_memory_order_acquire, rte_memory_order_relaxed)) { + RTE_LOG(INFO, POWER, "Power management of lcore %u is " + "in use\n", lcore_id); + return -1; + } + + pi->lcore_id = lcore_id; + /* Check and set the governor */ + if (power_set_governor_userspace(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " + "userspace\n", lcore_id); + goto fail; + } + + /* Get the available frequencies */ + if (power_get_available_freqs(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot get available frequencies of " + "lcore %u\n", lcore_id); + goto fail; + } + + /* Init for setting lcore frequency */ + if (power_init_for_setting_freq(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot init for setting frequency for " + "lcore %u\n", lcore_id); + goto fail; + } + + /* Set freq to max by default */ + if (power_amd_pstate_cpufreq_freq_max(lcore_id) < 0) { + RTE_LOG(ERR, POWER, "Cannot set frequency of lcore %u " + "to max\n", lcore_id); + goto fail; + } + + RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u " + "power management\n", lcore_id); + + rte_atomic_store_explicit(&(pi->state), POWER_USED, rte_memory_order_release); + + return 0; + +fail: + rte_atomic_store_explicit(&(pi->state), POWER_UNKNOWN, rte_memory_order_release); + return -1; +} + +/** + * It is to check the governor and then set the original governor back if + * needed by writing the sys file. + */ +static int +power_set_governor_original(struct amd_pstate_power_info *pi) +{ + return power_set_governor(pi->lcore_id, pi->governor_ori, NULL, 0); +} + +int +power_amd_pstate_cpufreq_exit(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + uint32_t exp_state; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n", + lcore_id, RTE_MAX_LCORE - 1U); + return -1; + } + pi = &lcore_power_info[lcore_id]; + exp_state = POWER_USED; + /* The power in use state works as a guard variable between + * the CPU frequency control initialization and exit process. + * The ACQUIRE memory ordering here pairs with the RELEASE + * ordering below as lock to make sure the frequency operations + * in the critical section are done under the correct state. + */ + if (!rte_atomic_compare_exchange_strong_explicit(&(pi->state), + &exp_state, POWER_ONGOING, + rte_memory_order_acquire, rte_memory_order_relaxed)) { + RTE_LOG(INFO, POWER, "Power management of lcore %u is " + "not used\n", lcore_id); + return -1; + } + + /* Close FD of setting freq */ + fclose(pi->f); + pi->f = NULL; + + /* Set the governor back to the original */ + if (power_set_governor_original(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set the governor of %u back " + "to the original\n", lcore_id); + goto fail; + } + + RTE_LOG(INFO, POWER, "Power management of lcore %u has exited from " + "'userspace' mode and been set back to the " + "original\n", lcore_id); + rte_atomic_store_explicit(&(pi->state), POWER_IDLE, rte_memory_order_release); + + return 0; + +fail: + rte_atomic_store_explicit(&(pi->state), POWER_UNKNOWN, rte_memory_order_release); + + return -1; +} + +uint32_t +power_amd_pstate_cpufreq_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t num) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return 0; + } + + if (freqs == NULL) { + RTE_LOG(ERR, POWER, "NULL buffer supplied\n"); + return 0; + } + + pi = &lcore_power_info[lcore_id]; + if (num < pi->nb_freqs) { + RTE_LOG(ERR, POWER, "Buffer size is not enough\n"); + return 0; + } + rte_memcpy(freqs, pi->freqs, pi->nb_freqs * sizeof(uint32_t)); + + return pi->nb_freqs; +} + +uint32_t +power_amd_pstate_cpufreq_get_freq(unsigned int lcore_id) +{ + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return RTE_POWER_INVALID_FREQ_INDEX; + } + + return lcore_power_info[lcore_id].curr_idx; +} + +int +power_amd_pstate_cpufreq_set_freq(unsigned int lcore_id, uint32_t index) +{ + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + return set_freq_internal(&(lcore_power_info[lcore_id]), index); +} + +int +power_amd_pstate_cpufreq_freq_down(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + if (pi->curr_idx + 1 == pi->nb_freqs) + return 0; + + /* Frequencies in the array are from high to low. */ + return set_freq_internal(pi, pi->curr_idx + 1); +} + +int +power_amd_pstate_cpufreq_freq_up(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + if (pi->curr_idx == 0 || (pi->curr_idx == pi->nom_idx && + pi->turbo_available && !pi->turbo_enable)) + return 0; + + /* Frequencies in the array are from high to low. */ + return set_freq_internal(pi, pi->curr_idx - 1); +} + +int +power_amd_pstate_cpufreq_freq_max(unsigned int lcore_id) +{ + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + /* Frequencies in the array are from high to low. */ + if (lcore_power_info[lcore_id].turbo_available) { + if (lcore_power_info[lcore_id].turbo_enable) + /* Set to Turbo */ + return set_freq_internal( + &lcore_power_info[lcore_id], 0); + else + /* Set to max non-turbo */ + return set_freq_internal( + &lcore_power_info[lcore_id], + lcore_power_info[lcore_id].nom_idx); + } else + return set_freq_internal(&lcore_power_info[lcore_id], 0); +} + +int +power_amd_pstate_cpufreq_freq_min(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + + /* Frequencies in the array are from high to low. */ + return set_freq_internal(pi, pi->nb_freqs - 1); +} + +int +power_amd_pstate_turbo_status(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + + return pi->turbo_enable; +} + +int +power_amd_pstate_enable_turbo(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + + if (pi->turbo_available) + pi->turbo_enable = 1; + else { + pi->turbo_enable = 0; + RTE_LOG(ERR, POWER, + "Failed to enable turbo on lcore %u\n", + lcore_id); + return -1; + } + + /* TODO: must set to max once enabling Turbo? Considering add condition: + * if ((pi->turbo_available) && (pi->curr_idx <= 1)) + */ + /* Max may have changed, so call to max function */ + if (power_amd_pstate_cpufreq_freq_max(lcore_id) < 0) { + RTE_LOG(ERR, POWER, + "Failed to set frequency of lcore %u to max\n", + lcore_id); + return -1; + } + + return 0; +} + +int +power_amd_pstate_disable_turbo(unsigned int lcore_id) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + + pi->turbo_enable = 0; + + if ((pi->turbo_available) && (pi->curr_idx <= pi->nom_idx)) { + /* Try to set freq to max by default coming out of turbo */ + if (power_amd_pstate_cpufreq_freq_max(lcore_id) < 0) { + RTE_LOG(ERR, POWER, + "Failed to set frequency of lcore %u to max\n", + lcore_id); + return -1; + } + } + + return 0; +} + +int +power_amd_pstate_get_capabilities(unsigned int lcore_id, + struct rte_power_core_capabilities *caps) +{ + struct amd_pstate_power_info *pi; + + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(ERR, POWER, "Invalid lcore ID\n"); + return -1; + } + if (caps == NULL) { + RTE_LOG(ERR, POWER, "Invalid argument\n"); + return -1; + } + + pi = &lcore_power_info[lcore_id]; + caps->capabilities = 0; + caps->turbo = !!(pi->turbo_available); + + return 0; +} diff --git a/lib/power/power_amd_pstate_cpufreq.h b/lib/power/power_amd_pstate_cpufreq.h new file mode 100644 index 00000000000..b02f9f98e4a --- /dev/null +++ b/lib/power/power_amd_pstate_cpufreq.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2021 Intel Corporation + * Copyright(c) 2021 Arm Limited + * Copyright(c) 2023 Amd Limited + */ + +#ifndef _POWER_AMD_PSTATE_CPUFREQ_H +#define _POWER_AMD_PSTATE_CPUFREQ_H + +/** + * @file + * RTE Power Management via userspace AMD pstate cpufreq + */ + +#include "rte_power.h" + +/** + * Check if amd p-state power management is supported. + * + * @return + * - 1 if supported + * - 0 if unsupported + * - -1 if error, with rte_errno indicating reason for error. + */ +int power_amd_pstate_cpufreq_check_supported(void); + +/** + * Initialize power management for a specific lcore. It will check and set the + * governor to userspace for the lcore, get the available frequencies, and + * prepare to set new lcore frequency. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 on success. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_init(unsigned int lcore_id); + +/** + * Exit power management on a specific lcore. It will set the governor to which + * is before initialized. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 on success. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_exit(unsigned int lcore_id); + +/** + * Get the available frequencies of a specific lcore. The return value will be + * the minimal one of the total number of available frequencies and the number + * of buffer. The index of available frequencies used in other interfaces + * should be in the range of 0 to this return value. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * @param freqs + * The buffer array to save the frequencies. + * @param num + * The number of frequencies to get. + * + * @return + * The number of available frequencies. + */ +uint32_t power_amd_pstate_cpufreq_freqs(unsigned int lcore_id, uint32_t *freqs, + uint32_t num); + +/** + * Return the current index of available frequencies of a specific lcore. It + * will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)' if error. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * The current index of available frequencies. + */ +uint32_t power_amd_pstate_cpufreq_get_freq(unsigned int lcore_id); + +/** + * Set the new frequency for a specific lcore by indicating the index of + * available frequencies. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * @param index + * The index of available frequencies. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_set_freq(unsigned int lcore_id, uint32_t index); + +/** + * Scale up the frequency of a specific lcore according to the available + * frequencies. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_freq_up(unsigned int lcore_id); + +/** + * Scale down the frequency of a specific lcore according to the available + * frequencies. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_freq_down(unsigned int lcore_id); + +/** + * Scale up the frequency of a specific lcore to the highest according to the + * available frequencies. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_freq_max(unsigned int lcore_id); + +/** + * Scale down the frequency of a specific lcore to the lowest according to the + * available frequencies. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int power_amd_pstate_cpufreq_freq_min(unsigned int lcore_id); + +/** + * Get the turbo status of a specific lcore. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 Turbo Boost is enabled on this lcore. + * - 0 Turbo Boost is disabled on this lcore. + * - Negative on error. + */ +int power_amd_pstate_turbo_status(unsigned int lcore_id); + +/** + * Enable Turbo Boost on a specific lcore. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 Turbo Boost is enabled successfully on this lcore. + * - Negative on error. + */ +int power_amd_pstate_enable_turbo(unsigned int lcore_id); + +/** + * Disable Turbo Boost on a specific lcore. + * It should be protected outside of this function for threadsafe. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 Turbo Boost disabled successfully on this lcore. + * - Negative on error. + */ +int power_amd_pstate_disable_turbo(unsigned int lcore_id); + +/** + * Returns power capabilities for a specific lcore. + * + * @param lcore_id + * lcore id. + * @param caps + * pointer to rte_power_core_capabilities object. + * + * @return + * - 0 on success. + * - Negative on error. + */ +int power_amd_pstate_get_capabilities(unsigned int lcore_id, + struct rte_power_core_capabilities *caps); + +#endif /* _POWER_AMD_PSTATET_CPUFREQ_H */ diff --git a/lib/power/rte_power_intel_uncore.c b/lib/power/power_intel_uncore.c similarity index 88% rename from lib/power/rte_power_intel_uncore.c rename to lib/power/power_intel_uncore.c index 3b8724385fb..688aebc4ee5 100644 --- a/lib/power/rte_power_intel_uncore.c +++ b/lib/power/power_intel_uncore.c @@ -8,7 +8,7 @@ #include -#include "rte_power_intel_uncore.h" +#include "power_intel_uncore.h" #include "power_common.h" #define MAX_UNCORE_FREQS 32 @@ -246,7 +246,7 @@ static int check_pkg_die_values(unsigned int pkg, unsigned int die) { unsigned int max_pkgs, max_dies; - max_pkgs = rte_power_uncore_get_num_pkgs(); + max_pkgs = power_intel_uncore_get_num_pkgs(); if (max_pkgs == 0) return -1; if (pkg >= max_pkgs) { @@ -255,7 +255,7 @@ check_pkg_die_values(unsigned int pkg, unsigned int die) return -1; } - max_dies = rte_power_uncore_get_num_dies(pkg); + max_dies = power_intel_uncore_get_num_dies(pkg); if (max_dies == 0) return -1; if (die >= max_dies) { @@ -268,7 +268,7 @@ check_pkg_die_values(unsigned int pkg, unsigned int die) } int -rte_power_uncore_init(unsigned int pkg, unsigned int die) +power_intel_uncore_init(unsigned int pkg, unsigned int die) { struct uncore_power_info *ui; @@ -298,7 +298,7 @@ rte_power_uncore_init(unsigned int pkg, unsigned int die) } int -rte_power_uncore_exit(unsigned int pkg, unsigned int die) +power_intel_uncore_exit(unsigned int pkg, unsigned int die) { struct uncore_power_info *ui; @@ -333,7 +333,7 @@ rte_power_uncore_exit(unsigned int pkg, unsigned int die) } uint32_t -rte_power_get_uncore_freq(unsigned int pkg, unsigned int die) +power_get_intel_uncore_freq(unsigned int pkg, unsigned int die) { int ret = check_pkg_die_values(pkg, die); if (ret < 0) @@ -343,7 +343,7 @@ rte_power_get_uncore_freq(unsigned int pkg, unsigned int die) } int -rte_power_set_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index) +power_set_intel_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index) { int ret = check_pkg_die_values(pkg, die); if (ret < 0) @@ -353,7 +353,7 @@ rte_power_set_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index) } int -rte_power_uncore_freq_max(unsigned int pkg, unsigned int die) +power_intel_uncore_freq_max(unsigned int pkg, unsigned int die) { int ret = check_pkg_die_values(pkg, die); if (ret < 0) @@ -364,7 +364,7 @@ rte_power_uncore_freq_max(unsigned int pkg, unsigned int die) int -rte_power_uncore_freq_min(unsigned int pkg, unsigned int die) +power_intel_uncore_freq_min(unsigned int pkg, unsigned int die) { int ret = check_pkg_die_values(pkg, die); if (ret < 0) @@ -376,7 +376,31 @@ rte_power_uncore_freq_min(unsigned int pkg, unsigned int die) } int -rte_power_uncore_get_num_freqs(unsigned int pkg, unsigned int die) +power_intel_uncore_freqs(unsigned int pkg, unsigned int die, uint32_t *freqs, uint32_t num) +{ + struct uncore_power_info *ui; + + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + if (freqs == NULL) { + RTE_LOG(ERR, POWER, "NULL buffer supplied\n"); + return 0; + } + + ui = &uncore_info[pkg][die]; + if (num < ui->nb_freqs) { + RTE_LOG(ERR, POWER, "Buffer size is not enough\n"); + return 0; + } + rte_memcpy(freqs, ui->freqs, ui->nb_freqs * sizeof(uint32_t)); + + return ui->nb_freqs; +} + +int +power_intel_uncore_get_num_freqs(unsigned int pkg, unsigned int die) { int ret = check_pkg_die_values(pkg, die); if (ret < 0) @@ -386,7 +410,7 @@ rte_power_uncore_get_num_freqs(unsigned int pkg, unsigned int die) } unsigned int -rte_power_uncore_get_num_pkgs(void) +power_intel_uncore_get_num_pkgs(void) { DIR *d; struct dirent *dir; @@ -397,7 +421,7 @@ rte_power_uncore_get_num_pkgs(void) if (d == NULL) { RTE_LOG(ERR, POWER, "Uncore frequency management not supported/enabled on this kernel. " - "Please enable CONFIG_INTEL_UNCORE_FREQ_CONTROL if on x86 with linux kernel" + "Please enable CONFIG_INTEL_UNCORE_FREQ_CONTROL if on Intel x86 with linux kernel" " >= 5.6\n"); return 0; } @@ -416,14 +440,14 @@ rte_power_uncore_get_num_pkgs(void) } unsigned int -rte_power_uncore_get_num_dies(unsigned int pkg) +power_intel_uncore_get_num_dies(unsigned int pkg) { DIR *d; struct dirent *dir; unsigned int count = 0, max_pkgs; char filter[FILTER_LENGTH]; - max_pkgs = rte_power_uncore_get_num_pkgs(); + max_pkgs = power_intel_uncore_get_num_pkgs(); if (max_pkgs == 0) return 0; if (pkg >= max_pkgs) { @@ -435,7 +459,7 @@ rte_power_uncore_get_num_dies(unsigned int pkg) if (d == NULL) { RTE_LOG(ERR, POWER, "Uncore frequency management not supported/enabled on this kernel. " - "Please enable CONFIG_INTEL_UNCORE_FREQ_CONTROL if on x86 with linux kernel" + "Please enable CONFIG_INTEL_UNCORE_FREQ_CONTROL if on Intel x86 with linux kernel" " >= 5.6\n"); return 0; } diff --git a/lib/power/rte_power_intel_uncore.h b/lib/power/power_intel_uncore.h similarity index 77% rename from lib/power/rte_power_intel_uncore.h rename to lib/power/power_intel_uncore.h index 0bd9f193a17..20a3ba8ebe3 100644 --- a/lib/power/rte_power_intel_uncore.h +++ b/lib/power/power_intel_uncore.h @@ -2,16 +2,16 @@ * Copyright(c) 2022 Intel Corporation */ -#ifndef RTE_POWER_INTEL_UNCORE_H -#define RTE_POWER_INTEL_UNCORE_H +#ifndef POWER_INTEL_UNCORE_H +#define POWER_INTEL_UNCORE_H /** * @file * RTE Intel Uncore Frequency Management */ -#include #include "rte_power.h" +#include "rte_power_uncore.h" #ifdef __cplusplus extern "C" { @@ -34,9 +34,8 @@ extern "C" { * - 0 on success. * - Negative on error. */ -__rte_experimental int -rte_power_uncore_init(unsigned int pkg, unsigned int die); +power_intel_uncore_init(unsigned int pkg, unsigned int die); /** * Exit uncore frequency management on a specific die on a package. @@ -56,9 +55,8 @@ rte_power_uncore_init(unsigned int pkg, unsigned int die); * - 0 on success. * - Negative on error. */ -__rte_experimental int -rte_power_uncore_exit(unsigned int pkg, unsigned int die); +power_intel_uncore_exit(unsigned int pkg, unsigned int die); /** * Return the current index of available frequencies of a specific die on a package. @@ -77,9 +75,8 @@ rte_power_uncore_exit(unsigned int pkg, unsigned int die); * The current index of available frequencies. * If error, it will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)'. */ -__rte_experimental uint32_t -rte_power_get_uncore_freq(unsigned int pkg, unsigned int die); +power_get_intel_uncore_freq(unsigned int pkg, unsigned int die); /** * Set minimum and maximum uncore frequency for specified die on a package @@ -102,9 +99,8 @@ rte_power_get_uncore_freq(unsigned int pkg, unsigned int die); * - 0 on success without frequency changed. * - Negative on error. */ -__rte_experimental int -rte_power_set_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index); +power_set_intel_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index); /** * Set minimum and maximum uncore frequency for specified die on a package @@ -125,9 +121,8 @@ rte_power_set_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index); * - 0 on success without frequency changed. * - Negative on error. */ -__rte_experimental int -rte_power_uncore_freq_max(unsigned int pkg, unsigned int die); +power_intel_uncore_freq_max(unsigned int pkg, unsigned int die); /** * Set minimum and maximum uncore frequency for specified die on a package @@ -148,9 +143,32 @@ rte_power_uncore_freq_max(unsigned int pkg, unsigned int die); * - 0 on success without frequency changed. * - Negative on error. */ -__rte_experimental int -rte_power_uncore_freq_min(unsigned int pkg, unsigned int die); +power_intel_uncore_freq_min(unsigned int pkg, unsigned int die); + +/** + * Return the list of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param freqs + * The buffer array to save the frequencies. + * @param num + * The number of frequencies to get. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +int +power_intel_uncore_freqs(unsigned int pkg, unsigned int die, + unsigned int *freqs, unsigned int num); /** * Return the list length of available frequencies in the index array. @@ -168,9 +186,8 @@ rte_power_uncore_freq_min(unsigned int pkg, unsigned int die); * - The number of available index's in frequency array. * - Negative on error. */ -__rte_experimental int -rte_power_uncore_get_num_freqs(unsigned int pkg, unsigned int die); +power_intel_uncore_get_num_freqs(unsigned int pkg, unsigned int die); /** * Return the number of packages (CPUs) on a system @@ -182,9 +199,8 @@ rte_power_uncore_get_num_freqs(unsigned int pkg, unsigned int die); * - Zero on error. * - Number of package on system on success. */ -__rte_experimental unsigned int -rte_power_uncore_get_num_pkgs(void); +power_intel_uncore_get_num_pkgs(void); /** * Return the number of dies for pakckages (CPUs) specified @@ -200,12 +216,11 @@ rte_power_uncore_get_num_pkgs(void); * - Zero on error. * - Number of dies for package on sucecss. */ -__rte_experimental unsigned int -rte_power_uncore_get_num_dies(unsigned int pkg); +power_intel_uncore_get_num_dies(unsigned int pkg); #ifdef __cplusplus } #endif -#endif /* RTE_POWER_INTEL_UNCORE_H */ +#endif /* POWER_INTEL_UNCORE_H */ diff --git a/lib/power/rte_power.c b/lib/power/rte_power.c index 63a43bd8f5a..48c2e6b428d 100644 --- a/lib/power/rte_power.c +++ b/lib/power/rte_power.c @@ -12,6 +12,7 @@ #include "power_cppc_cpufreq.h" #include "power_kvm_vm.h" #include "power_pstate_cpufreq.h" +#include "power_amd_pstate_cpufreq.h" enum power_management_env global_default_env = PM_ENV_NOT_SET; @@ -58,6 +59,8 @@ rte_power_check_env_supported(enum power_management_env env) return power_kvm_vm_check_supported(); case PM_ENV_CPPC_CPUFREQ: return power_cppc_cpufreq_check_supported(); + case PM_ENV_AMD_PSTATE_CPUFREQ: + return power_amd_pstate_cpufreq_check_supported(); default: rte_errno = EINVAL; return -1; @@ -126,6 +129,18 @@ rte_power_set_env(enum power_management_env env) rte_power_freq_enable_turbo = power_cppc_enable_turbo; rte_power_freq_disable_turbo = power_cppc_disable_turbo; rte_power_get_capabilities = power_cppc_get_capabilities; + } else if (env == PM_ENV_AMD_PSTATE_CPUFREQ) { + rte_power_freqs = power_amd_pstate_cpufreq_freqs; + rte_power_get_freq = power_amd_pstate_cpufreq_get_freq; + rte_power_set_freq = power_amd_pstate_cpufreq_set_freq; + rte_power_freq_up = power_amd_pstate_cpufreq_freq_up; + rte_power_freq_down = power_amd_pstate_cpufreq_freq_down; + rte_power_freq_min = power_amd_pstate_cpufreq_freq_min; + rte_power_freq_max = power_amd_pstate_cpufreq_freq_max; + rte_power_turbo_status = power_amd_pstate_turbo_status; + rte_power_freq_enable_turbo = power_amd_pstate_enable_turbo; + rte_power_freq_disable_turbo = power_amd_pstate_disable_turbo; + rte_power_get_capabilities = power_amd_pstate_get_capabilities; } else { RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n", env); @@ -171,6 +186,8 @@ rte_power_init(unsigned int lcore_id) return power_pstate_cpufreq_init(lcore_id); case PM_ENV_CPPC_CPUFREQ: return power_cppc_cpufreq_init(lcore_id); + case PM_ENV_AMD_PSTATE_CPUFREQ: + return power_amd_pstate_cpufreq_init(lcore_id); default: RTE_LOG(INFO, POWER, "Env isn't set yet!\n"); } @@ -190,6 +207,13 @@ rte_power_init(unsigned int lcore_id) goto out; } + RTE_LOG(INFO, POWER, "Attempting to initialise AMD PSTATE power management...\n"); + ret = power_amd_pstate_cpufreq_init(lcore_id); + if (ret == 0) { + rte_power_set_env(PM_ENV_AMD_PSTATE_CPUFREQ); + goto out; + } + RTE_LOG(INFO, POWER, "Attempting to initialise CPPC power management...\n"); ret = power_cppc_cpufreq_init(lcore_id); if (ret == 0) { @@ -221,6 +245,8 @@ rte_power_exit(unsigned int lcore_id) return power_pstate_cpufreq_exit(lcore_id); case PM_ENV_CPPC_CPUFREQ: return power_cppc_cpufreq_exit(lcore_id); + case PM_ENV_AMD_PSTATE_CPUFREQ: + return power_amd_pstate_cpufreq_exit(lcore_id); default: RTE_LOG(ERR, POWER, "Environment has not been set, unable to exit gracefully\n"); diff --git a/lib/power/rte_power.h b/lib/power/rte_power.h index 4d70d9a8d21..e79bf1c4dd0 100644 --- a/lib/power/rte_power.h +++ b/lib/power/rte_power.h @@ -21,7 +21,8 @@ extern "C" { /* Power Management Environment State */ enum power_management_env {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM, - PM_ENV_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ}; + PM_ENV_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ, + PM_ENV_AMD_PSTATE_CPUFREQ}; /** * @warning diff --git a/lib/power/rte_power_pmd_mgmt.c b/lib/power/rte_power_pmd_mgmt.c index ca1840387c7..38f83840856 100644 --- a/lib/power/rte_power_pmd_mgmt.c +++ b/lib/power/rte_power_pmd_mgmt.c @@ -421,7 +421,8 @@ check_scale(unsigned int lcore) /* only PSTATE and ACPI modes are supported */ if (!rte_power_check_env_supported(PM_ENV_ACPI_CPUFREQ) && - !rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ)) { + !rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ) && + !rte_power_check_env_supported(PM_ENV_AMD_PSTATE_CPUFREQ)) { RTE_LOG(DEBUG, POWER, "Neither ACPI nor PSTATE modes are supported\n"); return -ENOTSUP; } @@ -431,7 +432,8 @@ check_scale(unsigned int lcore) /* ensure we initialized the correct env */ env = rte_power_get_env(); - if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ) { + if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ && + env != PM_ENV_AMD_PSTATE_CPUFREQ) { RTE_LOG(DEBUG, POWER, "Neither ACPI nor PSTATE modes were initialized\n"); return -ENOTSUP; } diff --git a/lib/power/rte_power_uncore.c b/lib/power/rte_power_uncore.c new file mode 100644 index 00000000000..78a823d34cc --- /dev/null +++ b/lib/power/rte_power_uncore.c @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2023 AMD Corporation + */ + +#include + +#include +#include + +#include "rte_power_uncore.h" +#include "power_intel_uncore.h" + +enum rte_uncore_power_mgmt_env default_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET; + +static rte_spinlock_t global_env_cfg_lock = RTE_SPINLOCK_INITIALIZER; + +static uint32_t +power_get_dummy_uncore_freq(unsigned int pkg __rte_unused, + unsigned int die __rte_unused) +{ + return 0; +} + +static int +power_set_dummy_uncore_freq(unsigned int pkg __rte_unused, + unsigned int die __rte_unused, uint32_t index __rte_unused) +{ + return 0; +} + +static int +power_dummy_uncore_freq_max(unsigned int pkg __rte_unused, + unsigned int die __rte_unused) +{ + return 0; +} + +static int +power_dummy_uncore_freq_min(unsigned int pkg __rte_unused, + unsigned int die __rte_unused) +{ + return 0; +} + +static int +power_dummy_uncore_freqs(unsigned int pkg __rte_unused, unsigned int die __rte_unused, + uint32_t *freqs __rte_unused, uint32_t num __rte_unused) +{ + return 0; +} + +static int +power_dummy_uncore_get_num_freqs(unsigned int pkg __rte_unused, + unsigned int die __rte_unused) +{ + return 0; +} + +static unsigned int +power_dummy_uncore_get_num_pkgs(void) +{ + return 0; +} + +static unsigned int +power_dummy_uncore_get_num_dies(unsigned int pkg __rte_unused) +{ + return 0; +} + +/* function pointers */ +rte_power_get_uncore_freq_t rte_power_get_uncore_freq = power_get_dummy_uncore_freq; +rte_power_set_uncore_freq_t rte_power_set_uncore_freq = power_set_dummy_uncore_freq; +rte_power_uncore_freq_change_t rte_power_uncore_freq_max = power_dummy_uncore_freq_max; +rte_power_uncore_freq_change_t rte_power_uncore_freq_min = power_dummy_uncore_freq_min; +rte_power_uncore_freqs_t rte_power_uncore_freqs = power_dummy_uncore_freqs; +rte_power_uncore_get_num_freqs_t rte_power_uncore_get_num_freqs = power_dummy_uncore_get_num_freqs; +rte_power_uncore_get_num_pkgs_t rte_power_uncore_get_num_pkgs = power_dummy_uncore_get_num_pkgs; +rte_power_uncore_get_num_dies_t rte_power_uncore_get_num_dies = power_dummy_uncore_get_num_dies; + +static void +reset_power_uncore_function_ptrs(void) +{ + rte_power_get_uncore_freq = power_get_dummy_uncore_freq; + rte_power_set_uncore_freq = power_set_dummy_uncore_freq; + rte_power_uncore_freq_max = power_dummy_uncore_freq_max; + rte_power_uncore_freq_min = power_dummy_uncore_freq_min; + rte_power_uncore_freqs = power_dummy_uncore_freqs; + rte_power_uncore_get_num_freqs = power_dummy_uncore_get_num_freqs; + rte_power_uncore_get_num_pkgs = power_dummy_uncore_get_num_pkgs; + rte_power_uncore_get_num_dies = power_dummy_uncore_get_num_dies; +} + +int +rte_power_set_uncore_env(enum rte_uncore_power_mgmt_env env) +{ + int ret; + + rte_spinlock_lock(&global_env_cfg_lock); + + if (default_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) { + RTE_LOG(ERR, POWER, "Uncore Power Management Env already set.\n"); + rte_spinlock_unlock(&global_env_cfg_lock); + return -1; + } + + ret = 0; + if (env == RTE_UNCORE_PM_ENV_INTEL_UNCORE) { + rte_power_get_uncore_freq = power_get_intel_uncore_freq; + rte_power_set_uncore_freq = power_set_intel_uncore_freq; + rte_power_uncore_freq_min = power_intel_uncore_freq_min; + rte_power_uncore_freq_max = power_intel_uncore_freq_max; + rte_power_uncore_freqs = power_intel_uncore_freqs; + rte_power_uncore_get_num_freqs = power_intel_uncore_get_num_freqs; + rte_power_uncore_get_num_pkgs = power_intel_uncore_get_num_pkgs; + rte_power_uncore_get_num_dies = power_intel_uncore_get_num_dies; + } else { + RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n", env); + ret = -1; + goto out; + } + + default_uncore_env = env; +out: + rte_spinlock_unlock(&global_env_cfg_lock); + return ret; +} + +void +rte_power_unset_uncore_env(void) +{ + rte_spinlock_lock(&global_env_cfg_lock); + default_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET; + reset_power_uncore_function_ptrs(); + rte_spinlock_unlock(&global_env_cfg_lock); +} + +enum rte_uncore_power_mgmt_env +rte_power_get_uncore_env(void) +{ + return default_uncore_env; +} + +int +rte_power_uncore_init(unsigned int pkg, unsigned int die) +{ + int ret = -1; + + switch (default_uncore_env) { + case RTE_UNCORE_PM_ENV_INTEL_UNCORE: + return power_intel_uncore_init(pkg, die); + default: + RTE_LOG(INFO, POWER, "Uncore Env isn't set yet!\n"); + break; + } + + /* Auto detect Environment */ + RTE_LOG(INFO, POWER, "Attempting to initialise Intel Uncore power mgmt...\n"); + ret = power_intel_uncore_init(pkg, die); + if (ret == 0) { + rte_power_set_uncore_env(RTE_UNCORE_PM_ENV_INTEL_UNCORE); + goto out; + } + + if (default_uncore_env == RTE_UNCORE_PM_ENV_NOT_SET) { + RTE_LOG(ERR, POWER, "Unable to set Power Management Environment " + "for package %u Die %u\n", pkg, die); + ret = 0; + } +out: + return ret; +} + +int +rte_power_uncore_exit(unsigned int pkg, unsigned int die) +{ + switch (default_uncore_env) { + case RTE_UNCORE_PM_ENV_INTEL_UNCORE: + return power_intel_uncore_exit(pkg, die); + default: + RTE_LOG(ERR, POWER, "Uncore Env has not been set, unable to exit gracefully\n"); + break; + } + return -1; +} diff --git a/lib/power/rte_power_uncore.h b/lib/power/rte_power_uncore.h new file mode 100644 index 00000000000..295017b7f42 --- /dev/null +++ b/lib/power/rte_power_uncore.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Intel Corporation + * Copyright(c) 2023 AMD Corporation + */ + +#ifndef RTE_POWER_UNCORE_H +#define RTE_POWER_UNCORE_H + +/** + * @file + * RTE Uncore Frequency Management + */ + +#include +#include "rte_power.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Uncore Power Management Environment */ +enum rte_uncore_power_mgmt_env { + RTE_UNCORE_PM_ENV_NOT_SET, + RTE_UNCORE_PM_ENV_INTEL_UNCORE, + RTE_UNCORE_PM_ENV_AMD_HSMP +}; + +/** + * Set the default uncore power management implementation. If this is not called prior + * to rte_power_uncore_init(), then auto-detect of the environment will take place. + * It is thread safe. New env can be set only in uninitialized state + * (thus rte_power_unset_uncore_env must be called if different env was already set). + * + * @param env + * env. The environment in which to initialise Uncore Power Management for. + * + * @return + * - 0 on success. + * - Negative on error. + */ +__rte_experimental +int rte_power_set_uncore_env(enum rte_uncore_power_mgmt_env env); + +/** + * Unset the global uncore environment configuration. + * This can only be called after all threads have completed. + */ +__rte_experimental +void rte_power_unset_uncore_env(void); + +/** + * Get the default uncore power management implementation. + * + * @return + * power_management_env The configured environment. + */ +__rte_experimental +enum rte_uncore_power_mgmt_env rte_power_get_uncore_env(void); + +/** + * Initialize uncore frequency management for specific die on a package. + * It will get the available frequencies and prepare to set new die frequencies. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 0 on success. + * - Negative on error. + */ +__rte_experimental +int +rte_power_uncore_init(unsigned int pkg, unsigned int die); + +/** + * Exit uncore frequency management on a specific die on a package. + * It will restore uncore min and* max values to previous values + * before initialization of API. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 0 on success. + * - Negative on error. + */ +__rte_experimental +int +rte_power_uncore_exit(unsigned int pkg, unsigned int die); + +/** + * Return the current index of available frequencies of a specific die on a package. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * The current index of available frequencies. + * If error, it will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)'. + */ +typedef uint32_t (*rte_power_get_uncore_freq_t)(unsigned int pkg, unsigned int die); + +extern rte_power_get_uncore_freq_t rte_power_get_uncore_freq; + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to specified index value. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param index + * The index of available frequencies. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_set_uncore_freq_t)(unsigned int pkg, unsigned int die, uint32_t index); + +extern rte_power_set_uncore_freq_t rte_power_set_uncore_freq; + +/** + * Function pointer definition for generic frequency change functions. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_uncore_freq_change_t)(unsigned int pkg, unsigned int die); + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to maximum value according to the available frequencies. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + */ +extern rte_power_uncore_freq_change_t rte_power_uncore_freq_max; + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to minimum value according to the available frequencies. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + */ +extern rte_power_uncore_freq_change_t rte_power_uncore_freq_min; + +/** + * Return the list of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param freqs + * The buffer array to save the frequencies. + * @param num + * The number of frequencies to get. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +typedef int (*rte_power_uncore_freqs_t)(unsigned int pkg, unsigned int die, + uint32_t *freqs, uint32_t num); + +extern rte_power_uncore_freqs_t rte_power_uncore_freqs; + +/** + * Return the list length of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +typedef int (*rte_power_uncore_get_num_freqs_t)(unsigned int pkg, unsigned int die); + +extern rte_power_uncore_get_num_freqs_t rte_power_uncore_get_num_freqs; + +/** + * Return the number of packages (CPUs) on a system + * by parsing the uncore sysfs directory. + * + * This function should NOT be called in the fast path. + * + * @return + * - Zero on error. + * - Number of package on system on success. + */ +typedef unsigned int (*rte_power_uncore_get_num_pkgs_t)(void); + +extern rte_power_uncore_get_num_pkgs_t rte_power_uncore_get_num_pkgs; + +/** + * Return the number of dies for pakckages (CPUs) specified + * from parsing the uncore sysfs directory. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * + * @return + * - Zero on error. + * - Number of dies for package on sucecss. + */ +typedef unsigned int (*rte_power_uncore_get_num_dies_t)(unsigned int pkg); + +extern rte_power_uncore_get_num_dies_t rte_power_uncore_get_num_dies; + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_POWER_UNCORE_H */ diff --git a/lib/power/version.map b/lib/power/version.map index b8b54f768e5..53627b59f18 100644 --- a/lib/power/version.map +++ b/lib/power/version.map @@ -52,4 +52,10 @@ EXPERIMENTAL { rte_power_uncore_get_num_freqs; rte_power_uncore_get_num_pkgs; rte_power_uncore_init; + + # added in 23.11 + rte_power_get_uncore_env; + rte_power_set_uncore_env; + rte_power_uncore_freqs; + rte_power_unset_uncore_env; }; diff --git a/lib/rawdev/rte_rawdev.c b/lib/rawdev/rte_rawdev.c index dacaa60e22d..474bdc95407 100644 --- a/lib/rawdev/rte_rawdev.c +++ b/lib/rawdev/rte_rawdev.c @@ -505,8 +505,7 @@ rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id) uint16_t dev_id; if (rte_rawdev_pmd_get_named_dev(name) != NULL) { - RTE_RDEV_ERR("Event device with name %s already allocated!", - name); + RTE_RDEV_ERR("Raw device with name %s already allocated!", name); return NULL; } diff --git a/lib/rib/rte_rib.c b/lib/rib/rte_rib.c index 812a2597d11..486e8216dfe 100644 --- a/lib/rib/rte_rib.c +++ b/lib/rib/rte_rib.c @@ -302,7 +302,7 @@ rte_rib_insert(struct rte_rib *rib, uint32_t ip, uint8_t depth) /* closest node found, new_node should be inserted in the middle */ common_depth = RTE_MIN(depth, (*tmp)->depth); common_prefix = ip ^ (*tmp)->ip; - d = (common_prefix == 0) ? 32 : __builtin_clz(common_prefix); + d = (common_prefix == 0) ? 32 : rte_clz32(common_prefix); common_depth = RTE_MIN(d, common_depth); common_prefix = ip & rte_rib_depth_to_mask(common_depth); diff --git a/lib/rib/rte_rib6.c b/lib/rib/rte_rib6.c index ae44281ae10..94ff434978b 100644 --- a/lib/rib/rte_rib6.c +++ b/lib/rib/rte_rib6.c @@ -362,7 +362,7 @@ rte_rib6_insert(struct rte_rib6 *rib, if (ip_xor == 0) d += 8; else { - d += __builtin_clz(ip_xor << 24); + d += rte_clz32(ip_xor << 24); break; } } diff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h index d1e59bf9ad8..327fdcf28f9 100644 --- a/lib/ring/rte_ring_core.h +++ b/lib/ring/rte_ring_core.h @@ -126,7 +126,7 @@ struct rte_ring { uint32_t mask; /**< Mask (size-1) of ring. */ uint32_t capacity; /**< Usable size of ring */ - char pad0 __rte_cache_aligned; /**< empty cache line */ + RTE_CACHE_GUARD; /** Ring producer status. */ union { @@ -135,7 +135,7 @@ struct rte_ring { struct rte_ring_rts_headtail rts_prod; } __rte_cache_aligned; - char pad1 __rte_cache_aligned; /**< empty cache line */ + RTE_CACHE_GUARD; /** Ring consumer status. */ union { @@ -144,7 +144,7 @@ struct rte_ring { struct rte_ring_rts_headtail rts_cons; } __rte_cache_aligned; - char pad2 __rte_cache_aligned; /**< empty cache line */ + RTE_CACHE_GUARD; }; #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */ diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index 751f6cf8419..1a6beb14f4a 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -973,7 +973,7 @@ rte_sched_port_config(struct rte_sched_port_params *params) port->n_max_subport_profiles = params->n_max_subport_profiles; port->n_pipes_per_subport = params->n_pipes_per_subport; port->n_pipes_per_subport_log2 = - __builtin_ctz(params->n_pipes_per_subport); + rte_ctz32(params->n_pipes_per_subport); port->socket = params->socket; for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) diff --git a/lib/security/rte_security.c b/lib/security/rte_security.c index c4d64bb8e93..b082a290296 100644 --- a/lib/security/rte_security.c +++ b/lib/security/rte_security.c @@ -27,7 +27,10 @@ } while (0) #define RTE_SECURITY_DYNFIELD_NAME "rte_security_dynfield_metadata" +#define RTE_SECURITY_OOP_DYNFIELD_NAME "rte_security_oop_dynfield_metadata" + int rte_security_dynfield_offset = -1; +int rte_security_oop_dynfield_offset = -1; int rte_security_dynfield_register(void) @@ -42,12 +45,27 @@ rte_security_dynfield_register(void) return rte_security_dynfield_offset; } +int +rte_security_oop_dynfield_register(void) +{ + static const struct rte_mbuf_dynfield dynfield_desc = { + .name = RTE_SECURITY_OOP_DYNFIELD_NAME, + .size = sizeof(rte_security_oop_dynfield_t), + .align = __alignof__(rte_security_oop_dynfield_t), + }; + + rte_security_oop_dynfield_offset = + rte_mbuf_dynfield_register(&dynfield_desc); + return rte_security_oop_dynfield_offset; +} + void * -rte_security_session_create(struct rte_security_ctx *instance, +rte_security_session_create(void *ctx, struct rte_security_session_conf *conf, struct rte_mempool *mp) { struct rte_security_session *sess = NULL; + struct rte_security_ctx *instance = ctx; uint32_t sess_priv_size; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_create, NULL, NULL); @@ -76,10 +94,10 @@ rte_security_session_create(struct rte_security_ctx *instance, } int -rte_security_session_update(struct rte_security_ctx *instance, - void *sess, - struct rte_security_session_conf *conf) +rte_security_session_update(void *ctx, void *sess, struct rte_security_session_conf *conf) { + struct rte_security_ctx *instance = ctx; + RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_update, -EINVAL, -ENOTSUP); RTE_PTR_OR_ERR_RET(sess, -EINVAL); @@ -89,8 +107,10 @@ rte_security_session_update(struct rte_security_ctx *instance, } unsigned int -rte_security_session_get_size(struct rte_security_ctx *instance) +rte_security_session_get_size(void *ctx) { + struct rte_security_ctx *instance = ctx; + RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_get_size, 0, 0); return (sizeof(struct rte_security_session) + @@ -98,10 +118,10 @@ rte_security_session_get_size(struct rte_security_ctx *instance) } int -rte_security_session_stats_get(struct rte_security_ctx *instance, - void *sess, - struct rte_security_stats *stats) +rte_security_session_stats_get(void *ctx, void *sess, struct rte_security_stats *stats) { + struct rte_security_ctx *instance = ctx; + RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_stats_get, -EINVAL, -ENOTSUP); /* Parameter sess can be NULL in case of getting global statistics. */ @@ -111,8 +131,9 @@ rte_security_session_stats_get(struct rte_security_ctx *instance, } int -rte_security_session_destroy(struct rte_security_ctx *instance, void *sess) +rte_security_session_destroy(void *ctx, void *sess) { + struct rte_security_ctx *instance = ctx; int ret; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_destroy, -EINVAL, @@ -132,9 +153,9 @@ rte_security_session_destroy(struct rte_security_ctx *instance, void *sess) } int -rte_security_macsec_sc_create(struct rte_security_ctx *instance, - struct rte_security_macsec_sc *conf) +rte_security_macsec_sc_create(void *ctx, struct rte_security_macsec_sc *conf) { + struct rte_security_ctx *instance = ctx; int sc_id; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_create, -EINVAL, -ENOTSUP); @@ -148,9 +169,9 @@ rte_security_macsec_sc_create(struct rte_security_ctx *instance, } int -rte_security_macsec_sa_create(struct rte_security_ctx *instance, - struct rte_security_macsec_sa *conf) +rte_security_macsec_sa_create(void *ctx, struct rte_security_macsec_sa *conf) { + struct rte_security_ctx *instance = ctx; int sa_id; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_create, -EINVAL, -ENOTSUP); @@ -164,9 +185,10 @@ rte_security_macsec_sa_create(struct rte_security_ctx *instance, } int -rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t sc_id, +rte_security_macsec_sc_destroy(void *ctx, uint16_t sc_id, enum rte_security_macsec_direction dir) { + struct rte_security_ctx *instance = ctx; int ret; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_destroy, -EINVAL, -ENOTSUP); @@ -182,9 +204,10 @@ rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t sc_id } int -rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t sa_id, +rte_security_macsec_sa_destroy(void *ctx, uint16_t sa_id, enum rte_security_macsec_direction dir) { + struct rte_security_ctx *instance = ctx; int ret; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_destroy, -EINVAL, -ENOTSUP); @@ -200,10 +223,12 @@ rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t sa_id } int -rte_security_macsec_sc_stats_get(struct rte_security_ctx *instance, uint16_t sc_id, +rte_security_macsec_sc_stats_get(void *ctx, uint16_t sc_id, enum rte_security_macsec_direction dir, struct rte_security_macsec_sc_stats *stats) { + struct rte_security_ctx *instance = ctx; + RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_stats_get, -EINVAL, -ENOTSUP); RTE_PTR_OR_ERR_RET(stats, -EINVAL); @@ -211,10 +236,12 @@ rte_security_macsec_sc_stats_get(struct rte_security_ctx *instance, uint16_t sc_ } int -rte_security_macsec_sa_stats_get(struct rte_security_ctx *instance, uint16_t sa_id, +rte_security_macsec_sa_stats_get(void *ctx, uint16_t sa_id, enum rte_security_macsec_direction dir, struct rte_security_macsec_sa_stats *stats) { + struct rte_security_ctx *instance = ctx; + RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_stats_get, -EINVAL, -ENOTSUP); RTE_PTR_OR_ERR_RET(stats, -EINVAL); @@ -222,10 +249,9 @@ rte_security_macsec_sa_stats_get(struct rte_security_ctx *instance, uint16_t sa_ } int -__rte_security_set_pkt_metadata(struct rte_security_ctx *instance, - void *sess, - struct rte_mbuf *m, void *params) +__rte_security_set_pkt_metadata(void *ctx, void *sess, struct rte_mbuf *m, void *params) { + struct rte_security_ctx *instance = ctx; #ifdef RTE_DEBUG RTE_PTR_OR_ERR_RET(sess, -EINVAL); RTE_PTR_OR_ERR_RET(instance, -EINVAL); @@ -238,19 +264,21 @@ __rte_security_set_pkt_metadata(struct rte_security_ctx *instance, } const struct rte_security_capability * -rte_security_capabilities_get(struct rte_security_ctx *instance) +rte_security_capabilities_get(void *ctx) { + struct rte_security_ctx *instance = ctx; + RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, capabilities_get, NULL, NULL); return instance->ops->capabilities_get(instance->device); } const struct rte_security_capability * -rte_security_capability_get(struct rte_security_ctx *instance, - struct rte_security_capability_idx *idx) +rte_security_capability_get(void *ctx, struct rte_security_capability_idx *idx) { const struct rte_security_capability *capabilities; const struct rte_security_capability *capability; + struct rte_security_ctx *instance = ctx; uint16_t i = 0; RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, capabilities_get, NULL, NULL); @@ -282,6 +310,14 @@ rte_security_capability_get(struct rte_security_ctx *instance, if (capability->docsis.direction == idx->docsis.direction) return capability; + } else if (idx->protocol == + RTE_SECURITY_PROTOCOL_MACSEC) { + if (idx->macsec.alg == capability->macsec.alg) + return capability; + } else if (idx->protocol == RTE_SECURITY_PROTOCOL_TLS_RECORD) { + if (capability->tls_record.ver == idx->tls_record.ver && + capability->tls_record.type == idx->tls_record.type) + return capability; } } } @@ -289,6 +325,28 @@ rte_security_capability_get(struct rte_security_ctx *instance, return NULL; } +int +rte_security_rx_inject_configure(void *ctx, uint16_t port_id, bool enable) +{ + struct rte_security_ctx *instance = ctx; + + RTE_PTR_OR_ERR_RET(instance, -EINVAL); + RTE_PTR_OR_ERR_RET(instance->ops, -ENOTSUP); + RTE_PTR_OR_ERR_RET(instance->ops->rx_inject_configure, -ENOTSUP); + + return instance->ops->rx_inject_configure(instance->device, port_id, enable); +} + +uint16_t +rte_security_inb_pkt_rx_inject(void *ctx, struct rte_mbuf **pkts, void **sess, + uint16_t nb_pkts) +{ + struct rte_security_ctx *instance = ctx; + + return instance->ops->inb_pkt_rx_inject(instance->device, pkts, + (struct rte_security_session **)sess, nb_pkts); +} + static int security_handle_cryptodev_list(const char *cmd __rte_unused, const char *params __rte_unused, @@ -380,12 +438,12 @@ static int security_capabilities_from_dev_id(int dev_id, const void **caps) { const struct rte_security_capability *capabilities; - struct rte_security_ctx *sec_ctx; + void *sec_ctx; if (rte_cryptodev_is_valid_dev(dev_id) == 0) return -EINVAL; - sec_ctx = (struct rte_security_ctx *)rte_cryptodev_get_sec_ctx(dev_id); + sec_ctx = rte_cryptodev_get_sec_ctx(dev_id); RTE_PTR_OR_ERR_RET(sec_ctx, -EINVAL); capabilities = rte_security_capabilities_get(sec_ctx); diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h index 3f8abfef15e..d2d2af23d5d 100644 --- a/lib/security/rte_security.h +++ b/lib/security/rte_security.h @@ -56,30 +56,6 @@ enum rte_security_ipsec_tunnel_type { #define RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR 0x1 #define RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR 0x2 -/** - * Security context for crypto/eth devices - * - * Security instance for each driver to register security operations. - * The application can get the security context from the crypto/eth device id - * using the APIs rte_cryptodev_get_sec_ctx()/rte_eth_dev_get_sec_ctx() - * This structure is used to identify the device(crypto/eth) for which the - * security operations need to be performed. - */ -struct rte_security_ctx { - void *device; - /**< Crypto/ethernet device attached */ - const struct rte_security_ops *ops; - /**< Pointer to security ops for the device */ - uint16_t sess_cnt; - /**< Number of sessions attached to this context */ - uint16_t macsec_sc_cnt; - /**< Number of MACsec SC attached to this context */ - uint16_t macsec_sa_cnt; - /**< Number of MACsec SA attached to this context */ - uint32_t flags; - /**< Flags for security context */ -}; - #define RTE_SEC_CTX_F_FAST_SET_MDATA 0x00000001 /**< Driver uses fast metadata update without using driver specific callback. * For fast mdata, mbuf dynamic field would be registered by driver @@ -273,14 +249,16 @@ struct rte_security_ipsec_sa_options { */ uint32_t ip_reassembly_en : 1; - /** Reserved bit fields for future extension - * - * User should ensure reserved_opts is cleared as it may change in - * subsequent releases to support new options. + /** Enable out of place processing on inline inbound packets. * - * Note: Reduce number of bits in reserved_opts for every new option. + * * 1: Enable driver to perform Out-of-place(OOP) processing for this inline + * inbound SA if supported by driver. PMD need to register mbuf + * dynamic field using rte_security_oop_dynfield_register() + * and security session creation would fail if dynfield is not + * registered successfully. + * * 0: Disable OOP processing for this session (default). */ - uint32_t reserved_opts : 17; + uint32_t ingress_oop : 1; }; /** IPSec security association direction */ @@ -619,9 +597,137 @@ struct rte_security_docsis_xform { /**< DOCSIS direction */ }; +/** Implicit nonce length to be used with AEAD algos in TLS 1.2 */ +#define RTE_SECURITY_TLS_1_2_IMP_NONCE_LEN 4 +/** Implicit nonce length to be used with AEAD algos in TLS 1.3 */ +#define RTE_SECURITY_TLS_1_3_IMP_NONCE_LEN 12 +/** Implicit nonce length to be used with AEAD algos in DTLS 1.2 */ +#define RTE_SECURITY_DTLS_1_2_IMP_NONCE_LEN 4 + +/** TLS version */ +enum rte_security_tls_version { + RTE_SECURITY_VERSION_TLS_1_2, /**< TLS 1.2 */ + RTE_SECURITY_VERSION_TLS_1_3, /**< TLS 1.3 */ + RTE_SECURITY_VERSION_DTLS_1_2, /**< DTLS 1.2 */ +}; + +/** TLS session type */ +enum rte_security_tls_sess_type { + /** Record read session + * - Decrypt & digest verification. + */ + RTE_SECURITY_TLS_SESS_TYPE_READ, + /** Record write session + * - Encrypt & digest generation. + */ + RTE_SECURITY_TLS_SESS_TYPE_WRITE, +}; + +/** + * TLS record session options + */ +struct rte_security_tls_record_sess_options { + /** Disable IV generation in PMD. + * + * * 1: Disable IV generation in PMD. When disabled, IV provided in rte_crypto_op will be + * used by the PMD. + * + * * 0: Enable IV generation in PMD. When enabled, PMD generated random value would be used + * and application is not required to provide IV. + */ + uint32_t iv_gen_disable : 1; + /** Enable extra padding + * + * TLS allows user to pad the plain text to hide the actual size of the record. + * This is required to achieve traffic flow confidentiality in case of TLS/DTLS flows. + * This padding is in addition to the default padding performed by PMD + * (which ensures ciphertext is aligned to block size). + * + * On supported devices, application may pass the required additional padding via + * ``rte_crypto_op.aux_flags`` field. + * + * 1 : Enable extra padding of the plain text provided. The extra padding value would be + * read from ``rte_crypto_op.aux_flags``. + * + * 0 : Disable extra padding + */ + uint32_t extra_padding_enable : 1; +}; + +/** + * Configure soft and hard lifetime of a TLS record session. + * + * Lifetime of a TLS record session would specify the maximum number of packets that can be + * processed. TLS record processing operations would start failing once hard limit is reached. + * + * Soft limits can be specified to generate notification when the TLS record session is approaching + * hard limits for lifetime. This would result in a warning returned in ``rte_crypto_op.aux_flags``. + */ +struct rte_security_tls_record_lifetime { + /** Soft expiry limit in number of packets */ + uint64_t packets_soft_limit; + /** Hard expiry limit in number of packets */ + uint64_t packets_hard_limit; +}; + +/** + * TLS record protocol session configuration. + * + * This structure contains data required to create a TLS record security session. + */ +struct rte_security_tls_record_xform { + /** TLS record version. */ + enum rte_security_tls_version ver; + /** TLS record session type. */ + enum rte_security_tls_sess_type type; + /** TLS record session options. */ + struct rte_security_tls_record_sess_options options; + /** TLS record session lifetime. */ + struct rte_security_tls_record_lifetime life; + union { + /** TLS 1.2 parameters. */ + struct { + /** Starting sequence number. */ + uint64_t seq_no; + /** Implicit nonce to be used for AEAD algos. */ + uint8_t imp_nonce[RTE_SECURITY_TLS_1_2_IMP_NONCE_LEN]; + } tls_1_2; + + /** TLS 1.3 parameters. */ + struct { + /** Starting sequence number. */ + uint64_t seq_no; + /** Implicit nonce to be used for AEAD algos. */ + uint8_t imp_nonce[RTE_SECURITY_TLS_1_3_IMP_NONCE_LEN]; + /** + * Minimum payload length (in case of write sessions). + * For shorter inputs, the payload would be padded appropriately + * before performing crypto transformations. + */ + uint32_t min_payload_len; + } tls_1_3; + + /** DTLS 1.2 parameters */ + struct { + /** Epoch value to be used. */ + uint16_t epoch; + /** 6B starting sequence number to be used. */ + uint64_t seq_no; + /** Implicit nonce to be used for AEAD algos. */ + uint8_t imp_nonce[RTE_SECURITY_DTLS_1_2_IMP_NONCE_LEN]; + /** + * Anti replay window size to enable sequence replay attack handling. + * Anti replay check is disabled if the window size is 0. + */ + uint32_t ar_win_sz; + } dtls_1_2; + }; +}; + /** * Security session action type. */ +/* Enumeration of rte_security_session_action_type 8<*/ enum rte_security_session_action_type { RTE_SECURITY_ACTION_TYPE_NONE, /**< No security actions */ @@ -642,8 +748,10 @@ enum rte_security_session_action_type { * protocol is processed synchronously by a CPU. */ }; +/* >8 End enumeration of rte_security_session_action_type. */ /** Security session protocol definition */ +/* Enumeration of rte_security_session_protocol 8<*/ enum rte_security_session_protocol { RTE_SECURITY_PROTOCOL_IPSEC = 1, /**< IPsec Protocol */ @@ -653,11 +761,15 @@ enum rte_security_session_protocol { /**< PDCP Protocol */ RTE_SECURITY_PROTOCOL_DOCSIS, /**< DOCSIS Protocol */ + RTE_SECURITY_PROTOCOL_TLS_RECORD, + /**< TLS Record Protocol */ }; +/* >8 End enumeration of rte_security_session_protocol. */ /** * Security session configuration */ +/* Structure rte_security_session_conf 8< */ struct rte_security_session_conf { enum rte_security_session_action_type action_type; /**< Type of action to be performed on the session */ @@ -668,6 +780,7 @@ struct rte_security_session_conf { struct rte_security_macsec_xform macsec; struct rte_security_pdcp_xform pdcp; struct rte_security_docsis_xform docsis; + struct rte_security_tls_record_xform tls_record; }; /**< Configuration parameters for security session */ struct rte_crypto_sym_xform *crypto_xform; @@ -675,6 +788,7 @@ struct rte_security_session_conf { void *userdata; /**< Application specific userdata to be saved with session */ }; +/* >8 End of structure rte_security_session_conf. */ /** * Create security session as specified by the session configuration @@ -687,7 +801,7 @@ struct rte_security_session_conf { * - On failure, NULL */ void * -rte_security_session_create(struct rte_security_ctx *instance, +rte_security_session_create(void *instance, struct rte_security_session_conf *conf, struct rte_mempool *mp); @@ -703,7 +817,7 @@ rte_security_session_create(struct rte_security_ctx *instance, */ __rte_experimental int -rte_security_session_update(struct rte_security_ctx *instance, +rte_security_session_update(void *instance, void *sess, struct rte_security_session_conf *conf); @@ -717,7 +831,7 @@ rte_security_session_update(struct rte_security_ctx *instance, * - 0 if device is invalid or does not support the operation. */ unsigned int -rte_security_session_get_size(struct rte_security_ctx *instance); +rte_security_session_get_size(void *instance); /** * Free security session header and the session private data and @@ -734,7 +848,7 @@ rte_security_session_get_size(struct rte_security_ctx *instance); * - other negative values in case of freeing private data errors. */ int -rte_security_session_destroy(struct rte_security_ctx *instance, void *sess); +rte_security_session_destroy(void *instance, void *sess); /** * @warning @@ -753,7 +867,7 @@ rte_security_session_destroy(struct rte_security_ctx *instance, void *sess); */ __rte_experimental int -rte_security_macsec_sc_create(struct rte_security_ctx *instance, +rte_security_macsec_sc_create(void *instance, struct rte_security_macsec_sc *conf); /** @@ -772,7 +886,7 @@ rte_security_macsec_sc_create(struct rte_security_ctx *instance, */ __rte_experimental int -rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t sc_id, +rte_security_macsec_sc_destroy(void *instance, uint16_t sc_id, enum rte_security_macsec_direction dir); /** @@ -792,7 +906,7 @@ rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t sc_id */ __rte_experimental int -rte_security_macsec_sa_create(struct rte_security_ctx *instance, +rte_security_macsec_sa_create(void *instance, struct rte_security_macsec_sa *conf); /** @@ -811,7 +925,7 @@ rte_security_macsec_sa_create(struct rte_security_ctx *instance, */ __rte_experimental int -rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t sa_id, +rte_security_macsec_sa_destroy(void *instance, uint16_t sa_id, enum rte_security_macsec_direction dir); /** Device-specific metadata field type */ @@ -819,6 +933,13 @@ typedef uint64_t rte_security_dynfield_t; /** Dynamic mbuf field for device-specific metadata */ extern int rte_security_dynfield_offset; +/** Out-of-Place(OOP) processing field type */ +typedef struct rte_mbuf *rte_security_oop_dynfield_t; +/** Dynamic mbuf field for pointer to original mbuf for + * OOP processing session. + */ +extern int rte_security_oop_dynfield_offset; + /** * @warning * @b EXPERIMENTAL: this API may change without prior notice @@ -841,6 +962,25 @@ rte_security_dynfield(struct rte_mbuf *mbuf) rte_security_dynfield_t *); } +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Get pointer to mbuf field for original mbuf pointer when + * Out-Of-Place(OOP) processing is enabled in security session. + * + * @param mbuf packet to access + * @return pointer to mbuf field + */ +__rte_experimental +static inline rte_security_oop_dynfield_t * +rte_security_oop_dynfield(struct rte_mbuf *mbuf) +{ + return RTE_MBUF_DYNFIELD(mbuf, + rte_security_oop_dynfield_offset, + rte_security_oop_dynfield_t *); +} + /** * @warning * @b EXPERIMENTAL: this API may change without prior notice @@ -855,6 +995,27 @@ static inline bool rte_security_dynfield_is_registered(void) return rte_security_dynfield_offset >= 0; } +#define RTE_SECURITY_CTX_FLAGS_OFF 4 +/** + * Get security flags from security instance. + */ +static inline uint32_t +rte_security_ctx_flags_get(void *ctx) +{ + return *((uint32_t *)ctx + RTE_SECURITY_CTX_FLAGS_OFF); +} + +/** + * Set security flags in security instance. + */ +static inline void +rte_security_ctx_flags_set(void *ctx, uint32_t flags) +{ + uint32_t *data; + data = (((uint32_t *)ctx) + RTE_SECURITY_CTX_FLAGS_OFF); + *data = flags; +} + #define RTE_SECURITY_SESS_OPAQUE_DATA_OFF 0 #define RTE_SECURITY_SESS_FAST_MDATA_OFF 1 /** @@ -899,7 +1060,7 @@ rte_security_session_fast_mdata_set(void *sess, uint64_t fdata) /** Function to call PMD specific function pointer set_pkt_metadata() */ __rte_experimental -int __rte_security_set_pkt_metadata(struct rte_security_ctx *instance, +int __rte_security_set_pkt_metadata(void *instance, void *sess, struct rte_mbuf *m, void *params); @@ -917,12 +1078,12 @@ int __rte_security_set_pkt_metadata(struct rte_security_ctx *instance, * - On failure, a negative value. */ static inline int -rte_security_set_pkt_metadata(struct rte_security_ctx *instance, +rte_security_set_pkt_metadata(void *instance, void *sess, struct rte_mbuf *mb, void *params) { /* Fast Path */ - if (instance->flags & RTE_SEC_CTX_F_FAST_SET_MDATA) { + if (rte_security_ctx_flags_get(instance) & RTE_SEC_CTX_F_FAST_SET_MDATA) { *rte_security_dynfield(mb) = (rte_security_dynfield_t) rte_security_session_fast_mdata_get(sess); return 0; @@ -1071,7 +1232,7 @@ struct rte_security_stats { */ __rte_experimental int -rte_security_session_stats_get(struct rte_security_ctx *instance, +rte_security_session_stats_get(void *instance, void *sess, struct rte_security_stats *stats); @@ -1091,7 +1252,7 @@ rte_security_session_stats_get(struct rte_security_ctx *instance, */ __rte_experimental int -rte_security_macsec_sa_stats_get(struct rte_security_ctx *instance, +rte_security_macsec_sa_stats_get(void *instance, uint16_t sa_id, enum rte_security_macsec_direction dir, struct rte_security_macsec_sa_stats *stats); @@ -1111,7 +1272,7 @@ rte_security_macsec_sa_stats_get(struct rte_security_ctx *instance, */ __rte_experimental int -rte_security_macsec_sc_stats_get(struct rte_security_ctx *instance, +rte_security_macsec_sc_stats_get(void *instance, uint16_t sc_id, enum rte_security_macsec_direction dir, struct rte_security_macsec_sc_stats *stats); @@ -1186,6 +1347,17 @@ struct rte_security_capability { /**< DOCSIS direction */ } docsis; /**< DOCSIS capability */ + struct { + enum rte_security_tls_version ver; + /**< TLS record version. */ + enum rte_security_tls_sess_type type; + /**< TLS record session type. */ + uint32_t ar_win_size; + /**< Maximum anti replay window size supported for DTLS 1.2 record read + * operation. Value of 0 means anti replay check is not supported. + */ + } tls_record; + /**< TLS record capability */ }; const struct rte_cryptodev_capabilities *crypto_capabilities; @@ -1246,6 +1418,13 @@ struct rte_security_capability_idx { struct { enum rte_security_docsis_direction direction; } docsis; + struct { + enum rte_security_macsec_alg alg; + } macsec; + struct { + enum rte_security_tls_version ver; + enum rte_security_tls_sess_type type; + } tls_record; }; }; @@ -1259,7 +1438,7 @@ struct rte_security_capability_idx { * - Return NULL if no capabilities available. */ const struct rte_security_capability * -rte_security_capabilities_get(struct rte_security_ctx *instance); +rte_security_capabilities_get(void *instance); /** * Query if a specific capability is available on security instance @@ -1273,9 +1452,93 @@ rte_security_capabilities_get(struct rte_security_ctx *instance); * - Return NULL if the capability not matched on security instance. */ const struct rte_security_capability * -rte_security_capability_get(struct rte_security_ctx *instance, +rte_security_capability_get(void *instance, struct rte_security_capability_idx *idx); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Configure security device to inject packets to an ethdev port. + * + * This API must be called only when both security device and the ethdev is in + * stopped state. The security device need to be configured before any packets + * are submitted to ``rte_security_inb_pkt_rx_inject`` API. + * + * @param ctx Security ctx + * @param port_id Port identifier of the ethernet device to which + * packets need to be injected. + * @param enable Flag to enable and disable connection between a + * security device and an ethdev port. + * @return + * - 0 if successful. + * - -EINVAL if context NULL or port_id is invalid. + * - -EBUSY if devices are not in stopped state. + * - -ENOTSUP if security device does not support injecting to ethdev port. + * + * @see rte_security_inb_pkt_rx_inject + */ +__rte_experimental +int +rte_security_rx_inject_configure(void *ctx, uint16_t port_id, bool enable); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Perform security processing of packets and inject the processed packet to + * ethdev Rx. + * + * Rx inject would behave similarly to ethdev loopback but with the additional + * security processing. In case of ethdev loopback, application would be + * submitting packets to ethdev Tx queues and would be received as is from + * ethdev Rx queues. With Rx inject, packets would be received after security + * processing from ethdev Rx queues. + * + * With inline protocol offload capable ethdevs, Rx injection can be used to + * handle packets which failed the regular security Rx path. This can be due to + * cases such as outer fragmentation, in which case applications can reassemble + * the fragments and then subsequently submit for inbound processing and Rx + * injection, so that packets are received as regular security processed + * packets. + * + * With lookaside protocol offload capable cryptodevs, Rx injection can be used + * to perform packet parsing after security processing. This would allow for + * re-classification after security protocol processing is done (ie, inner + * packet parsing). The ethdev queue on which the packet would be received would + * be based on rte_flow rules matching the packet after security processing. + * + * The security device which is injecting packets to ethdev Rx need to be + * configured using ``rte_security_rx_inject_configure`` with enable flag set + * to `true` before any packets are submitted. + * + * If `hash.fdir.h` field is set in mbuf, it would be treated as the value for + * `MARK` pattern for the subsequent rte_flow parsing. The packet would appear + * as if it is received from `port` field in mbuf. + * + * Since the packet would be received back from ethdev Rx queues, + * it is expected that application retains/adds L2 header with the + * mbuf field 'l2_len' reflecting the size of L2 header in the packet. + * + * @param ctx Security ctx + * @param pkts The address of an array of *nb_pkts* pointers to + * *rte_mbuf* structures which contain the packets. + * @param sess The address of an array of *nb_pkts* pointers to + * security sessions corresponding to each packet. + * @param nb_pkts The maximum number of packets to process. + * + * @return + * The number of packets successfully injected to ethdev Rx. + * The return value can be less than the value of the *nb_pkts* parameter + * when the PMD internal queues have been filled up. + * + * @see rte_security_rx_inject_configure + */ +__rte_experimental +uint16_t +rte_security_inb_pkt_rx_inject(void *ctx, struct rte_mbuf **pkts, void **sess, + uint16_t nb_pkts); + #ifdef __cplusplus } #endif diff --git a/lib/security/rte_security_driver.h b/lib/security/rte_security_driver.h index 31444a05d3e..62664dacdbb 100644 --- a/lib/security/rte_security_driver.h +++ b/lib/security/rte_security_driver.h @@ -37,6 +37,30 @@ struct rte_security_session { /**< Private session material, variable size (depends on driver) */ }; +/** + * Security context for crypto/eth devices + * + * Security instance for each driver to register security operations. + * The application can get the security context from the crypto/eth device id + * using the APIs rte_cryptodev_get_sec_ctx()/rte_eth_dev_get_sec_ctx() + * This structure is used to identify the device(crypto/eth) for which the + * security operations need to be performed. + */ +struct rte_security_ctx { + void *device; + /**< Crypto/ethernet device attached */ + const struct rte_security_ops *ops; + /**< Pointer to security ops for the device */ + uint32_t flags; + /**< Flags for security context */ + uint16_t sess_cnt; + /**< Number of sessions attached to this context */ + uint16_t macsec_sc_cnt; + /**< Number of MACsec SC attached to this context */ + uint16_t macsec_sa_cnt; + /**< Number of MACsec SA attached to this context */ +}; + /** * Helper macro to get driver private data */ @@ -197,6 +221,14 @@ typedef int (*security_macsec_sa_stats_get_t)(void *device, uint16_t sa_id, __rte_internal int rte_security_dynfield_register(void); +/** + * @internal + * Register mbuf dynamic field for security inline ingress Out-of-Place(OOP) + * processing. + */ +__rte_internal +int rte_security_oop_dynfield_register(void); + /** * Update the mbuf with provided metadata. * @@ -225,6 +257,46 @@ typedef int (*security_set_pkt_metadata_t)(void *device, typedef const struct rte_security_capability *(*security_capabilities_get_t)( void *device); +/** + * Configure security device to inject packets to an ethdev port. + * + * @param device Crypto/eth device pointer + * @param port_id Port identifier of the ethernet device to which packets need to be + * injected. + * @param enable Flag to enable and disable connection between a security device and + * an ethdev port. + * @return + * - 0 if successful. + * - -EINVAL if context NULL or port_id is invalid. + * - -EBUSY if devices are not in stopped state. + * - -ENOTSUP if security device does not support injecting to the ethdev port. + */ +typedef int (*security_rx_inject_configure)(void *device, uint16_t port_id, bool enable); + +/** + * Perform security processing of packets and inject the processed packet to + * ethdev Rx. + * + * Rx inject would behave similarly to ethdev loopback but with the additional + * security processing. + * + * @param device Crypto/eth device pointer + * @param pkts The address of an array of *nb_pkts* pointers to + * *rte_mbuf* structures which contain the packets. + * @param sess The address of an array of *nb_pkts* pointers to + * *rte_security_session* structures corresponding + * to each packet. + * @param nb_pkts The maximum number of packets to process. + * + * @return + * The number of packets successfully injected to ethdev Rx. The return + * value can be less than the value of the *nb_pkts* parameter when the + * PMD internal queues have been filled up. + */ +typedef uint16_t (*security_inb_pkt_rx_inject)(void *device, + struct rte_mbuf **pkts, struct rte_security_session **sess, + uint16_t nb_pkts); + /** Security operations function pointer table */ struct rte_security_ops { security_session_create_t session_create; @@ -253,6 +325,10 @@ struct rte_security_ops { /**< Get MACsec SC statistics. */ security_macsec_sa_stats_get_t macsec_sa_stats_get; /**< Get MACsec SA statistics. */ + security_rx_inject_configure rx_inject_configure; + /**< Rx inject configure. */ + security_inb_pkt_rx_inject inb_pkt_rx_inject; + /**< Perform security processing and do Rx inject. */ }; #ifdef __cplusplus diff --git a/lib/security/version.map b/lib/security/version.map index b2097a969d1..e07fca33a1e 100644 --- a/lib/security/version.map +++ b/lib/security/version.map @@ -23,10 +23,15 @@ EXPERIMENTAL { rte_security_macsec_sc_stats_get; rte_security_session_stats_get; rte_security_session_update; + rte_security_oop_dynfield_offset; + + rte_security_rx_inject_configure; + rte_security_inb_pkt_rx_inject; }; INTERNAL { global: rte_security_dynfield_register; + rte_security_oop_dynfield_register; }; diff --git a/lib/table/rte_lru_x86.h b/lib/table/rte_lru_x86.h index 38476d956ee..ddfb8c1c8cf 100644 --- a/lib/table/rte_lru_x86.h +++ b/lib/table/rte_lru_x86.h @@ -20,7 +20,7 @@ extern "C" { #if RTE_TABLE_HASH_LRU_STRATEGY == 2 -#if RTE_CC_IS_GNU && (GCC_VERSION > 40306) +#if RTE_CC_IS_GNU #include #else #include @@ -64,7 +64,7 @@ do { \ #elif RTE_TABLE_HASH_LRU_STRATEGY == 3 -#if RTE_CC_IS_GNU && (GCC_VERSION > 40306) +#if RTE_CC_IS_GNU #include #else #include diff --git a/lib/table/rte_swx_table_learner.c b/lib/table/rte_swx_table_learner.c index 996fd3de5b7..2b5e6bdce1d 100644 --- a/lib/table/rte_swx_table_learner.c +++ b/lib/table/rte_swx_table_learner.c @@ -202,7 +202,7 @@ table_params_get(struct table_params *p, struct rte_swx_table_learner_params *pa p->key_size_pow2 = rte_align64pow2(p->key_size); - p->key_size_log2 = __builtin_ctzll(p->key_size_pow2); + p->key_size_log2 = rte_ctz64(p->key_size_pow2); p->key_offset = params->key_offset; @@ -211,7 +211,7 @@ table_params_get(struct table_params *p, struct rte_swx_table_learner_params *pa p->data_size_pow2 = rte_align64pow2(sizeof(uint64_t) + p->action_data_size); - p->data_size_log2 = __builtin_ctzll(p->data_size_pow2); + p->data_size_log2 = rte_ctz64(p->data_size_pow2); /* Buckets. */ p->n_buckets = rte_align32pow2(params->n_keys_max); @@ -224,7 +224,7 @@ table_params_get(struct table_params *p, struct rte_swx_table_learner_params *pa p->bucket_key_all_size + TABLE_KEYS_PER_BUCKET * p->data_size_pow2); - p->bucket_size_log2 = __builtin_ctzll(p->bucket_size); + p->bucket_size_log2 = rte_ctz64(p->bucket_size); p->hash_func = params->hash_func ? params->hash_func : rte_hash_crc; diff --git a/lib/table/rte_table_acl.c b/lib/table/rte_table_acl.c index 53fd5c66adb..902cb78eac5 100644 --- a/lib/table/rte_table_acl.c +++ b/lib/table/rte_table_acl.c @@ -719,12 +719,12 @@ rte_table_acl_lookup( uint64_t pkts_out_mask; uint32_t n_pkts, i, j; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_ACL_STATS_PKTS_IN_ADD(acl, n_pkts_in); /* Input conversion */ for (i = 0, j = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX - - __builtin_clzll(pkts_mask)); i++) { + rte_clz64(pkts_mask)); i++) { uint64_t pkt_mask = 1LLU << i; if (pkt_mask & pkts_mask) { @@ -744,7 +744,7 @@ rte_table_acl_lookup( pkts_out_mask = 0; for (i = 0; i < n_pkts; i++) { uint32_t action_table_pos = results[i]; - uint32_t pkt_pos = __builtin_ctzll(pkts_mask); + uint32_t pkt_pos = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_pos; pkts_mask &= ~pkt_mask; @@ -759,7 +759,7 @@ rte_table_acl_lookup( } *lookup_hit_mask = pkts_out_mask; - RTE_TABLE_ACL_STATS_PKTS_LOOKUP_MISS(acl, n_pkts_in - __builtin_popcountll(pkts_out_mask)); + RTE_TABLE_ACL_STATS_PKTS_LOOKUP_MISS(acl, n_pkts_in - rte_popcount64(pkts_out_mask)); return 0; } diff --git a/lib/table/rte_table_array.c b/lib/table/rte_table_array.c index 54a0c42f7dd..a45b29ed6a0 100644 --- a/lib/table/rte_table_array.c +++ b/lib/table/rte_table_array.c @@ -146,12 +146,12 @@ rte_table_array_lookup( void **entries) { struct rte_table_array *t = (struct rte_table_array *) table; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_ARRAY_STATS_PKTS_IN_ADD(t, n_pkts_in); *lookup_hit_mask = pkts_mask; if ((pkts_mask & (pkts_mask + 1)) == 0) { - uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint64_t n_pkts = rte_popcount64(pkts_mask); uint32_t i; for (i = 0; i < n_pkts; i++) { @@ -164,7 +164,7 @@ rte_table_array_lookup( } } else { for ( ; pkts_mask; ) { - uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint32_t pkt_index = rte_ctz64(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; uint32_t entry_pos = RTE_MBUF_METADATA_UINT32(pkt, diff --git a/lib/table/rte_table_hash_cuckoo.c b/lib/table/rte_table_hash_cuckoo.c index c77eccf5272..86c960c1034 100644 --- a/lib/table/rte_table_hash_cuckoo.c +++ b/lib/table/rte_table_hash_cuckoo.c @@ -237,7 +237,7 @@ rte_table_hash_cuckoo_lookup(void *table, uint64_t pkts_mask_out = 0; uint32_t i; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_CUCKOO_STATS_PKTS_IN_ADD(t, n_pkts_in); @@ -268,7 +268,7 @@ rte_table_hash_cuckoo_lookup(void *table, } } else for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX - - __builtin_clzll(pkts_mask)); i++) { + - rte_clz64(pkts_mask)); i++) { uint64_t pkt_mask = 1LLU << i; if (pkt_mask & pkts_mask) { @@ -288,7 +288,7 @@ rte_table_hash_cuckoo_lookup(void *table, *lookup_hit_mask = pkts_mask_out; RTE_TABLE_HASH_CUCKOO_STATS_PKTS_LOOKUP_MISS(t, - n_pkts_in - __builtin_popcountll(pkts_mask_out)); + n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; diff --git a/lib/table/rte_table_hash_ext.c b/lib/table/rte_table_hash_ext.c index 4753ccb15c5..51a20acbd77 100644 --- a/lib/table/rte_table_hash_ext.c +++ b/lib/table/rte_table_hash_ext.c @@ -469,7 +469,7 @@ static int rte_table_hash_ext_lookup_unoptimized( struct rte_table_hash *t = (struct rte_table_hash *) table; uint64_t pkts_mask_out = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); for ( ; pkts_mask; ) { struct bucket *bkt0, *bkt; @@ -478,7 +478,7 @@ static int rte_table_hash_ext_lookup_unoptimized( uint64_t pkt_mask, sig; uint32_t pkt_index, bkt_index, i; - pkt_index = __builtin_ctzll(pkts_mask); + pkt_index = rte_ctz64(pkts_mask); pkt_mask = 1LLU << pkt_index; pkts_mask &= ~pkt_mask; @@ -669,12 +669,12 @@ static int rte_table_hash_ext_lookup_unoptimized( struct rte_mbuf *mbuf00, *mbuf01; \ uint32_t key_offset = t->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ mbuf00 = pkts[pkt00_index]; \ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ pkt01_mask = 1LLU << pkt01_index; \ pkts_mask &= ~pkt01_mask; \ mbuf01 = pkts[pkt01_index]; \ @@ -690,12 +690,12 @@ static int rte_table_hash_ext_lookup_unoptimized( struct rte_mbuf *mbuf00, *mbuf01; \ uint32_t key_offset = t->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ mbuf00 = pkts[pkt00_index]; \ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ if (pkts_mask == 0) \ pkt01_index = pkt00_index; \ pkt01_mask = 1LLU << pkt01_index; \ @@ -857,15 +857,15 @@ static int rte_table_hash_ext_lookup( uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0; int status = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in); /* Cannot run the pipeline with less than 7 packets */ - if (__builtin_popcountll(pkts_mask) < 7) { + if (rte_popcount64(pkts_mask) < 7) { status = rte_table_hash_ext_lookup_unoptimized(table, pkts, pkts_mask, lookup_hit_mask, entries); RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - - __builtin_popcountll(*lookup_hit_mask)); + rte_popcount64(*lookup_hit_mask)); return status; } @@ -976,7 +976,7 @@ static int rte_table_hash_ext_lookup( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - rte_popcount64(pkts_mask_out)); return status; } diff --git a/lib/table/rte_table_hash_key16.c b/lib/table/rte_table_hash_key16.c index 04d7fd64bd9..584c3f2c98a 100644 --- a/lib/table/rte_table_hash_key16.c +++ b/lib/table/rte_table_hash_key16.c @@ -636,7 +636,7 @@ rte_table_hash_entry_delete_key16_ext( uint64_t pkt_mask; \ uint32_t key_offset = f->key_offset;\ \ - pkt0_index = __builtin_ctzll(pkts_mask); \ + pkt0_index = rte_ctz64(pkts_mask); \ pkt_mask = 1LLU << pkt0_index; \ pkts_mask &= ~pkt_mask; \ \ @@ -741,14 +741,14 @@ rte_table_hash_entry_delete_key16_ext( uint64_t pkt00_mask, pkt01_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ \ mbuf00 = pkts[pkt00_index]; \ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ pkt01_mask = 1LLU << pkt01_index; \ pkts_mask &= ~pkt01_mask; \ \ @@ -762,14 +762,14 @@ rte_table_hash_entry_delete_key16_ext( uint64_t pkt00_mask, pkt01_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ \ mbuf00 = pkts[pkt00_index]; \ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ if (pkts_mask == 0) \ pkt01_index = pkt00_index; \ pkt01_mask = 1LLU << pkt01_index; \ @@ -882,12 +882,12 @@ rte_table_hash_lookup_key16_lru( uint32_t pkt11_index, pkt20_index, pkt21_index; uint64_t pkts_mask_out = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in); /* Cannot run the pipeline with less than 5 packets */ - if (__builtin_popcountll(pkts_mask) < 5) { + if (rte_popcount64(pkts_mask) < 5) { for ( ; pkts_mask; ) { struct rte_bucket_4_16 *bucket; struct rte_mbuf *mbuf; @@ -901,7 +901,7 @@ rte_table_hash_lookup_key16_lru( *lookup_hit_mask = pkts_mask_out; RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - - __builtin_popcountll(pkts_mask_out)); + rte_popcount64(pkts_mask_out)); return 0; } @@ -992,7 +992,7 @@ rte_table_hash_lookup_key16_lru( *lookup_hit_mask = pkts_mask_out; RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - - __builtin_popcountll(pkts_mask_out)); + rte_popcount64(pkts_mask_out)); return 0; } /* lookup LRU */ @@ -1013,12 +1013,12 @@ rte_table_hash_lookup_key16_ext( struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX]; uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX]; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in); /* Cannot run the pipeline with less than 5 packets */ - if (__builtin_popcountll(pkts_mask) < 5) { + if (rte_popcount64(pkts_mask) < 5) { for ( ; pkts_mask; ) { struct rte_bucket_4_16 *bucket; struct rte_mbuf *mbuf; @@ -1131,7 +1131,7 @@ rte_table_hash_lookup_key16_ext( uint64_t pkt_mask; uint32_t pkt_index; - pkt_index = __builtin_ctzll(buckets_mask); + pkt_index = rte_ctz64(buckets_mask); pkt_mask = 1LLU << pkt_index; buckets_mask &= ~pkt_mask; @@ -1144,7 +1144,7 @@ rte_table_hash_lookup_key16_ext( *lookup_hit_mask = pkts_mask_out; RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - - __builtin_popcountll(pkts_mask_out)); + rte_popcount64(pkts_mask_out)); return 0; } /* lookup EXT */ diff --git a/lib/table/rte_table_hash_key32.c b/lib/table/rte_table_hash_key32.c index 88d8f69c72e..22b5ca9166d 100644 --- a/lib/table/rte_table_hash_key32.c +++ b/lib/table/rte_table_hash_key32.c @@ -664,7 +664,7 @@ rte_table_hash_entry_delete_key32_ext( uint64_t pkt_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt0_index = __builtin_ctzll(pkts_mask); \ + pkt0_index = rte_ctz64(pkts_mask); \ pkt_mask = 1LLU << pkt0_index; \ pkts_mask &= ~pkt_mask; \ \ @@ -773,14 +773,14 @@ rte_table_hash_entry_delete_key32_ext( uint64_t pkt00_mask, pkt01_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ \ mbuf00 = pkts[pkt00_index]; \ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ pkt01_mask = 1LLU << pkt01_index; \ pkts_mask &= ~pkt01_mask; \ \ @@ -794,14 +794,14 @@ rte_table_hash_entry_delete_key32_ext( uint64_t pkt00_mask, pkt01_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ \ mbuf00 = pkts[pkt00_index]; \ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ if (pkts_mask == 0) \ pkt01_index = pkt00_index; \ \ @@ -919,11 +919,11 @@ rte_table_hash_lookup_key32_lru( uint32_t pkt11_index, pkt20_index, pkt21_index; uint64_t pkts_mask_out = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in); /* Cannot run the pipeline with less than 5 packets */ - if (__builtin_popcountll(pkts_mask) < 5) { + if (rte_popcount64(pkts_mask) < 5) { for ( ; pkts_mask; ) { struct rte_bucket_4_32 *bucket; struct rte_mbuf *mbuf; @@ -936,7 +936,7 @@ rte_table_hash_lookup_key32_lru( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } @@ -1027,7 +1027,7 @@ rte_table_hash_lookup_key32_lru( mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f); *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } /* rte_table_hash_lookup_key32_lru() */ @@ -1048,11 +1048,11 @@ rte_table_hash_lookup_key32_ext( struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX]; uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX]; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in); /* Cannot run the pipeline with less than 5 packets */ - if (__builtin_popcountll(pkts_mask) < 5) { + if (rte_popcount64(pkts_mask) < 5) { for ( ; pkts_mask; ) { struct rte_bucket_4_32 *bucket; struct rte_mbuf *mbuf; @@ -1165,7 +1165,7 @@ rte_table_hash_lookup_key32_ext( uint64_t pkt_mask; uint32_t pkt_index; - pkt_index = __builtin_ctzll(buckets_mask); + pkt_index = rte_ctz64(buckets_mask); pkt_mask = 1LLU << pkt_index; buckets_mask &= ~pkt_mask; @@ -1177,7 +1177,7 @@ rte_table_hash_lookup_key32_ext( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } /* rte_table_hash_lookup_key32_ext() */ diff --git a/lib/table/rte_table_hash_key8.c b/lib/table/rte_table_hash_key8.c index 035d2427694..bd0ec4aac03 100644 --- a/lib/table/rte_table_hash_key8.c +++ b/lib/table/rte_table_hash_key8.c @@ -608,7 +608,7 @@ rte_table_hash_entry_delete_key8_ext( uint64_t pkt_mask; \ uint32_t key_offset = f->key_offset;\ \ - pkt0_index = __builtin_ctzll(pkts_mask); \ + pkt0_index = rte_ctz64(pkts_mask); \ pkt_mask = 1LLU << pkt0_index; \ pkts_mask &= ~pkt_mask; \ \ @@ -710,14 +710,14 @@ rte_table_hash_entry_delete_key8_ext( uint64_t pkt00_mask, pkt01_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ \ mbuf00 = pkts[pkt00_index]; \ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ pkt01_mask = 1LLU << pkt01_index; \ pkts_mask &= ~pkt01_mask; \ \ @@ -731,14 +731,14 @@ rte_table_hash_entry_delete_key8_ext( uint64_t pkt00_mask, pkt01_mask; \ uint32_t key_offset = f->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ \ mbuf00 = pkts[pkt00_index]; \ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ if (pkts_mask == 0) \ pkt01_index = pkt00_index; \ \ @@ -854,11 +854,11 @@ rte_table_hash_lookup_key8_lru( uint32_t pkt11_index, pkt20_index, pkt21_index; uint64_t pkts_mask_out = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_KEY8_STATS_PKTS_IN_ADD(f, n_pkts_in); /* Cannot run the pipeline with less than 5 packets */ - if (__builtin_popcountll(pkts_mask) < 5) { + if (rte_popcount64(pkts_mask) < 5) { for ( ; pkts_mask; ) { struct rte_bucket_4_8 *bucket; struct rte_mbuf *mbuf; @@ -871,7 +871,7 @@ rte_table_hash_lookup_key8_lru( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } @@ -961,7 +961,7 @@ rte_table_hash_lookup_key8_lru( bucket20, bucket21, pkts_mask_out, entries, f); *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } /* lookup LRU */ @@ -982,11 +982,11 @@ rte_table_hash_lookup_key8_ext( struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX]; uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX]; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_KEY8_STATS_PKTS_IN_ADD(f, n_pkts_in); /* Cannot run the pipeline with less than 5 packets */ - if (__builtin_popcountll(pkts_mask) < 5) { + if (rte_popcount64(pkts_mask) < 5) { for ( ; pkts_mask; ) { struct rte_bucket_4_8 *bucket; struct rte_mbuf *mbuf; @@ -1099,7 +1099,7 @@ rte_table_hash_lookup_key8_ext( uint64_t pkt_mask; uint32_t pkt_index; - pkt_index = __builtin_ctzll(buckets_mask); + pkt_index = rte_ctz64(buckets_mask); pkt_mask = 1LLU << pkt_index; buckets_mask &= ~pkt_mask; @@ -1111,7 +1111,7 @@ rte_table_hash_lookup_key8_ext( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } /* lookup EXT */ diff --git a/lib/table/rte_table_hash_lru.c b/lib/table/rte_table_hash_lru.c index f312d898c2e..a4e1a0599cb 100644 --- a/lib/table/rte_table_hash_lru.c +++ b/lib/table/rte_table_hash_lru.c @@ -404,7 +404,7 @@ static int rte_table_hash_lru_lookup_unoptimized( struct rte_table_hash *t = (struct rte_table_hash *) table; uint64_t pkts_mask_out = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in); for ( ; pkts_mask; ) { @@ -414,7 +414,7 @@ static int rte_table_hash_lru_lookup_unoptimized( uint64_t pkt_mask, sig; uint32_t pkt_index, bkt_index, i; - pkt_index = __builtin_ctzll(pkts_mask); + pkt_index = rte_ctz64(pkts_mask); pkt_mask = 1LLU << pkt_index; pkts_mask &= ~pkt_mask; @@ -447,7 +447,7 @@ static int rte_table_hash_lru_lookup_unoptimized( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - rte_popcount64(pkts_mask_out)); return 0; } @@ -606,12 +606,12 @@ static int rte_table_hash_lru_lookup_unoptimized( struct rte_mbuf *mbuf00, *mbuf01; \ uint32_t key_offset = t->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ mbuf00 = pkts[pkt00_index]; \ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ pkt01_mask = 1LLU << pkt01_index; \ pkts_mask &= ~pkt01_mask; \ mbuf01 = pkts[pkt01_index]; \ @@ -627,12 +627,12 @@ static int rte_table_hash_lru_lookup_unoptimized( struct rte_mbuf *mbuf00, *mbuf01; \ uint32_t key_offset = t->key_offset; \ \ - pkt00_index = __builtin_ctzll(pkts_mask); \ + pkt00_index = rte_ctz64(pkts_mask); \ pkt00_mask = 1LLU << pkt00_index; \ pkts_mask &= ~pkt00_mask; \ mbuf00 = pkts[pkt00_index]; \ \ - pkt01_index = __builtin_ctzll(pkts_mask); \ + pkt01_index = rte_ctz64(pkts_mask); \ if (pkts_mask == 0) \ pkt01_index = pkt00_index; \ \ @@ -809,11 +809,11 @@ static int rte_table_hash_lru_lookup( uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0; int status = 0; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in); /* Cannot run the pipeline with less than 7 packets */ - if (__builtin_popcountll(pkts_mask) < 7) + if (rte_popcount64(pkts_mask) < 7) return rte_table_hash_lru_lookup_unoptimized(table, pkts, pkts_mask, lookup_hit_mask, entries); @@ -924,7 +924,7 @@ static int rte_table_hash_lru_lookup( } *lookup_hit_mask = pkts_mask_out; - RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out)); + RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - rte_popcount64(pkts_mask_out)); return status; } diff --git a/lib/table/rte_table_lpm.c b/lib/table/rte_table_lpm.c index 9de9e8a20d7..c2ef0d9ba04 100644 --- a/lib/table/rte_table_lpm.c +++ b/lib/table/rte_table_lpm.c @@ -309,12 +309,12 @@ rte_table_lpm_lookup( uint64_t pkts_out_mask = 0; uint32_t i; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in); pkts_out_mask = 0; for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX - - __builtin_clzll(pkts_mask)); i++) { + rte_clz64(pkts_mask)); i++) { uint64_t pkt_mask = 1LLU << i; if (pkt_mask & pkts_mask) { @@ -334,7 +334,7 @@ rte_table_lpm_lookup( } *lookup_hit_mask = pkts_out_mask; - RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask)); + RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - rte_popcount64(pkts_out_mask)); return 0; } diff --git a/lib/table/rte_table_lpm_ipv6.c b/lib/table/rte_table_lpm_ipv6.c index 8fde2c012f7..6f3e11a14f9 100644 --- a/lib/table/rte_table_lpm_ipv6.c +++ b/lib/table/rte_table_lpm_ipv6.c @@ -310,12 +310,12 @@ rte_table_lpm_ipv6_lookup( uint64_t pkts_out_mask = 0; uint32_t i; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_LPM_IPV6_STATS_PKTS_IN_ADD(lpm, n_pkts_in); pkts_out_mask = 0; for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX - - __builtin_clzll(pkts_mask)); i++) { + rte_clz64(pkts_mask)); i++) { uint64_t pkt_mask = 1LLU << i; if (pkt_mask & pkts_mask) { @@ -335,7 +335,7 @@ rte_table_lpm_ipv6_lookup( } *lookup_hit_mask = pkts_out_mask; - RTE_TABLE_LPM_IPV6_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask)); + RTE_TABLE_LPM_IPV6_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - rte_popcount64(pkts_out_mask)); return 0; } diff --git a/lib/table/rte_table_stub.c b/lib/table/rte_table_stub.c index 23d0de5c79b..cc215169953 100644 --- a/lib/table/rte_table_stub.c +++ b/lib/table/rte_table_stub.c @@ -56,7 +56,7 @@ rte_table_stub_lookup( __rte_unused void **entries) { __rte_unused struct rte_table_stub *stub = (struct rte_table_stub *) table; - __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); + __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); RTE_TABLE_LPM_STATS_PKTS_IN_ADD(stub, n_pkts_in); *lookup_hit_mask = 0; diff --git a/lib/telemetry/telemetry.c b/lib/telemetry/telemetry.c index 7d0488a6d7d..aeb078c8a80 100644 --- a/lib/telemetry/telemetry.c +++ b/lib/telemetry/telemetry.c @@ -561,7 +561,7 @@ telemetry_legacy_init(void) return -1; } pthread_setaffinity_np(t_old, sizeof(*thread_cpuset), thread_cpuset); - set_thread_name(t_old, "telemetry-v1"); + set_thread_name(t_old, "dpdk-telemet-v1"); TMTY_LOG(DEBUG, "Legacy telemetry socket initialized ok\n"); pthread_detach(t_old); return 0; @@ -615,7 +615,7 @@ telemetry_v2_init(void) return -1; } pthread_setaffinity_np(t_new, sizeof(*thread_cpuset), thread_cpuset); - set_thread_name(t_new, "telemetry-v2"); + set_thread_name(t_new, "dpdk-telemet-v2"); pthread_detach(t_new); atexit(unlink_sockets); diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c index 1876fada335..134414fb4b6 100644 --- a/lib/vhost/fd_man.c +++ b/lib/vhost/fd_man.c @@ -212,7 +212,7 @@ fdset_try_del(struct fdset *pfdset, int fd) * will wait until the flag is reset to zero(which indicates the callback is * finished), then it could free the context after fdset_del. */ -void * +uint32_t fdset_event_dispatch(void *arg) { int i; @@ -227,7 +227,7 @@ fdset_event_dispatch(void *arg) int val; if (pfdset == NULL) - return NULL; + return 0; while (1) { @@ -303,7 +303,7 @@ fdset_event_dispatch(void *arg) fdset_shrink(pfdset); } - return NULL; + return 0; } static void diff --git a/lib/vhost/fd_man.h b/lib/vhost/fd_man.h index 6f4499bdfa5..6315904c8e2 100644 --- a/lib/vhost/fd_man.h +++ b/lib/vhost/fd_man.h @@ -46,7 +46,7 @@ int fdset_add(struct fdset *pfdset, int fd, void *fdset_del(struct fdset *pfdset, int fd); int fdset_try_del(struct fdset *pfdset, int fd); -void *fdset_event_dispatch(void *arg); +uint32_t fdset_event_dispatch(void *arg); int fdset_pipe_init(struct fdset *fdset); diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c index 424121cc00b..87ac0e51267 100644 --- a/lib/vhost/iotlb.c +++ b/lib/vhost/iotlb.c @@ -271,7 +271,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua new_node->uaddr = uaddr; new_node->uoffset = uoffset; new_node->size = size; - new_node->page_shift = __builtin_ctzll(page_size); + new_node->page_shift = rte_ctz64(page_size); new_node->perm = perm; vhost_user_iotlb_wr_lock_all(dev); diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c index fefe60fae6f..5882e44176a 100644 --- a/lib/vhost/socket.c +++ b/lib/vhost/socket.c @@ -13,8 +13,8 @@ #include #include #include -#include +#include #include #include "fd_man.h" @@ -431,7 +431,7 @@ struct vhost_user_reconnect_list { }; static struct vhost_user_reconnect_list reconn_list; -static pthread_t reconn_tid; +static rte_thread_t reconn_tid; static int vhost_user_connect_nonblock(char *path, int fd, struct sockaddr *un, size_t sz) @@ -455,7 +455,7 @@ vhost_user_connect_nonblock(char *path, int fd, struct sockaddr *un, size_t sz) return 0; } -static void * +static uint32_t vhost_user_client_reconnect(void *arg __rte_unused) { int ret; @@ -496,7 +496,7 @@ vhost_user_client_reconnect(void *arg __rte_unused) sleep(1); } - return NULL; + return 0; } static int @@ -511,8 +511,8 @@ vhost_user_reconnect_init(void) } TAILQ_INIT(&reconn_list.head); - ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL, - vhost_user_client_reconnect, NULL); + ret = rte_thread_create_internal_control(&reconn_tid, "vhost-reco", + vhost_user_client_reconnect, NULL); if (ret != 0) { VHOST_LOG_CONFIG("thread", ERR, "failed to create reconnect thread\n"); if (pthread_mutex_destroy(&reconn_list.mutex)) @@ -1004,7 +1004,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags) if (!vsocket->is_vduse) { if ((flags & RTE_VHOST_USER_CLIENT) != 0) { vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT); - if (vsocket->reconnect && reconn_tid == 0) { + if (vsocket->reconnect && reconn_tid.opaque_id == 0) { if (vhost_user_reconnect_init() != 0) goto out_mutex; } @@ -1174,7 +1174,7 @@ int rte_vhost_driver_start(const char *path) { struct vhost_user_socket *vsocket; - static pthread_t fdset_tid; + static rte_thread_t fdset_tid; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); @@ -1186,7 +1186,7 @@ rte_vhost_driver_start(const char *path) if (vsocket->is_vduse) return vduse_device_create(path, vsocket->net_compliant_ol_flags); - if (fdset_tid == 0) { + if (fdset_tid.opaque_id == 0) { /** * create a pipe which will be waited by poll and notified to * rebuild the wait list of poll. @@ -1196,9 +1196,8 @@ rte_vhost_driver_start(const char *path) return -1; } - int ret = rte_ctrl_thread_create(&fdset_tid, - "vhost-events", NULL, fdset_event_dispatch, - &vhost_user.fdset); + int ret = rte_thread_create_internal_control(&fdset_tid, + "vhost-evt", fdset_event_dispatch, &vhost_user.fdset); if (ret != 0) { VHOST_LOG_CONFIG(path, ERR, "failed to create fdset handling thread\n"); fdset_pipe_uninit(&vhost_user.fdset); diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c index 73ed4242323..080b58f7de1 100644 --- a/lib/vhost/vduse.c +++ b/lib/vhost/vduse.c @@ -16,6 +16,7 @@ #include #include +#include #include "fd_man.h" #include "iotlb.h" @@ -162,9 +163,12 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index) VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index); VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num); - VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", vq_info.desc_addr); - VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", vq_info.driver_addr); - VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", vq_info.device_addr); + VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", + (unsigned long long)vq_info.desc_addr); + VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", + (unsigned long long)vq_info.driver_addr); + VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", + (unsigned long long)vq_info.device_addr); VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq_info.split.avail_index); VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready); @@ -415,7 +419,7 @@ int vduse_device_create(const char *path, bool compliant_ol_flags) { int control_fd, dev_fd, vid, ret; - pthread_t fdset_tid; + rte_thread_t fdset_tid; uint32_t i, max_queue_pairs, total_queues; struct virtio_net *dev; struct virtio_net_config vnet_config = {{ 0 }}; @@ -435,7 +439,7 @@ vduse_device_create(const char *path, bool compliant_ol_flags) return -1; } - ret = rte_ctrl_thread_create(&fdset_tid, "vduse-events", NULL, + ret = rte_thread_create_internal_control(&fdset_tid, "vduse-evt", fdset_event_dispatch, &vduse.fdset); if (ret != 0) { VHOST_LOG_CONFIG(path, ERR, "failed to create vduse fdset handling thread\n"); diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index eb6309b6819..7fde412ef39 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -6,6 +6,7 @@ #include #include #include +#include #ifdef RTE_LIBRTE_VHOST_NUMA #include #include @@ -48,6 +49,8 @@ static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = { stats.guest_notifications_offloaded)}, {"guest_notifications_error", offsetof(struct vhost_virtqueue, stats.guest_notifications_error)}, + {"guest_notifications_suppressed", offsetof(struct vhost_virtqueue, + stats.guest_notifications_suppressed)}, {"iotlb_hits", offsetof(struct vhost_virtqueue, stats.iotlb_hits)}, {"iotlb_misses", offsetof(struct vhost_virtqueue, stats.iotlb_misses)}, {"inflight_submitted", offsetof(struct vhost_virtqueue, stats.inflight_submitted)}, @@ -1516,6 +1519,8 @@ rte_vhost_notify_guest(int vid, uint16_t queue_id) rte_rwlock_read_lock(&vq->access_lock); + __atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE); + if (dev->backend_ops->inject_irq(dev, vq)) { if (dev->flags & VIRTIO_DEV_STATS_ENABLED) __atomic_fetch_add(&vq->stats.guest_notifications_error, diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 9723429b1cb..5fc9035a1f1 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -156,6 +156,7 @@ struct virtqueue_stats { uint64_t iotlb_misses; uint64_t inflight_submitted; uint64_t inflight_completed; + uint64_t guest_notifications_suppressed; /* Counters below are atomic, and should be incremented as such. */ uint64_t guest_notifications; uint64_t guest_notifications_offloaded; @@ -346,6 +347,8 @@ struct vhost_virtqueue { struct vhost_vring_addr ring_addrs; struct virtqueue_stats stats; + + bool irq_pending; } __rte_cache_aligned; /* Virtio device status as per Virtio specification */ @@ -908,12 +911,24 @@ vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) static __rte_always_inline void vhost_vring_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq) { - if (dev->notify_ops->guest_notify && - dev->notify_ops->guest_notify(dev->vid, vq->index)) { - if (dev->flags & VIRTIO_DEV_STATS_ENABLED) - __atomic_fetch_add(&vq->stats.guest_notifications_offloaded, - 1, __ATOMIC_RELAXED); - return; + bool expected = false; + + if (dev->notify_ops->guest_notify) { + if (__atomic_compare_exchange_n(&vq->irq_pending, &expected, true, 0, + __ATOMIC_RELEASE, __ATOMIC_RELAXED)) { + if (dev->notify_ops->guest_notify(dev->vid, vq->index)) { + if (dev->flags & VIRTIO_DEV_STATS_ENABLED) + __atomic_fetch_add(&vq->stats.guest_notifications_offloaded, + 1, __ATOMIC_RELAXED); + return; + } + + /* Offloading failed, fallback to direct IRQ injection */ + __atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE); + } else { + vq->stats.guest_notifications_suppressed++; + return; + } } if (dev->backend_ops->inject_irq(dev, vq)) { diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index d7624d18c87..759a78e3e3b 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -77,7 +77,7 @@ vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t bin; /* count zeros, and offset into correct bin */ - bin = (sizeof(pkt_len) * 8) - __builtin_clz(pkt_len) - 5; + bin = (sizeof(pkt_len) * 8) - rte_clz32(pkt_len) - 5; stats->size_bins[bin]++; } else { if (pkt_len < 64) diff --git a/meson.build b/meson.build index 70b54f0c982..5e161f43e58 100644 --- a/meson.build +++ b/meson.build @@ -44,6 +44,7 @@ dpdk_drivers = [] dpdk_extra_ldflags = [] dpdk_libs_deprecated = [] dpdk_apps_disabled = [] +dpdk_apps_enabled = [] dpdk_libs_disabled = [] dpdk_libs_enabled = [] dpdk_drvs_disabled = [] @@ -94,10 +95,8 @@ install_subdir('examples', install_dir: get_option('datadir') + '/dpdk', exclude_files: ex_file_excludes) -# build kernel modules if enabled -if get_option('enable_kmods') - subdir('kernel') -endif +# build kernel modules +subdir('kernel') # check header includes if requested if get_option('check_includes') @@ -123,7 +122,7 @@ endif output_message = '\n=================\nApplications Enabled\n=================\n' output_message += '\napps:\n\t' output_count = 0 -foreach app:enabled_apps +foreach app:dpdk_apps_enabled output_message += app + ', ' output_count += 1 if output_count == 8 diff --git a/meson_options.txt b/meson_options.txt index 621e1ca9ba8..50a9d3669d3 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -11,19 +11,23 @@ option('disable_apps', type: 'string', value: '', description: option('disable_drivers', type: 'string', value: '', description: 'Comma-separated list of drivers to explicitly disable.') option('disable_libs', type: 'string', value: '', description: - 'Comma-separated list of libraries to explicitly disable. [NOTE: not all libs can be disabled]') + 'Comma-separated list of optional libraries to explicitly disable. [NOTE: mandatory libs cannot be disabled]') option('drivers_install_subdir', type: 'string', value: 'dpdk/pmds-', description: 'Subdirectory of libdir where to install PMDs. Defaults to using a versioned subdirectory.') option('enable_docs', type: 'boolean', value: false, description: 'build documentation') option('enable_apps', type: 'string', value: '', description: 'Comma-separated list of apps to build. If unspecified, build all apps.') +option('enable_deprecated_libs', type: 'string', value: '', description: + 'Comma-separated list of deprecated libraries to explicitly enable.') option('enable_drivers', type: 'string', value: '', description: 'Comma-separated list of drivers to build. If unspecified, build all drivers.') option('enable_driver_sdk', type: 'boolean', value: false, description: 'Install headers to build drivers.') -option('enable_kmods', type: 'boolean', value: false, description: - 'build kernel modules') +option('enable_kmods', type: 'boolean', value: true, description: + '[Deprecated - will be removed in future release] build kernel modules') +option('enable_libs', type: 'string', value: '', description: + 'Comma-separated list of optional libraries to explicitly enable. [NOTE: mandatory libs are always enabled]') option('examples', type: 'string', value: '', description: 'Comma-separated list of examples to build by default') option('ibverbs_link', type: 'combo', choices : ['static', 'shared', 'dlopen'], value: 'shared', description: @@ -46,6 +50,8 @@ option('mbuf_refcnt_atomic', type: 'boolean', value: true, description: 'Atomically access the mbuf refcnt.') option('platform', type: 'string', value: 'native', description: 'Platform to build, either "native", "generic" or a SoC. Please refer to the Linux build guide for more information.') +option('enable_stdatomic', type: 'boolean', value: false, description: + 'enable use of C11 stdatomic') option('enable_trace_fp', type: 'boolean', value: false, description: 'enable fast path trace points.') option('tests', type: 'boolean', value: true, description: diff --git a/usertools/meson.build b/usertools/meson.build index 0efa4a86d97..740b4832f36 100644 --- a/usertools/meson.build +++ b/usertools/meson.build @@ -1,6 +1,10 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +if is_ms_compiler + subdir_done() +endif + install_data([ 'dpdk-devbind.py', 'dpdk-pmdinfo.py',