diff --git a/.github/workflows/ci-pipeline-arm64.yml b/.github/workflows/ci-pipeline-arm64.yml index 3f7304e912..b4fb1e271e 100644 --- a/.github/workflows/ci-pipeline-arm64.yml +++ b/.github/workflows/ci-pipeline-arm64.yml @@ -35,6 +35,26 @@ jobs: if: ${{ failure() }} run: find . -name config.log -exec cat {} \; + Build_static_u22: + if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }} + runs-on: [self-hosted, ARM64] + env: + OS: ubuntu_22.04 + CONF: "--disable-shared --without-openssl --without-pcap" + strategy: + fail-fast: false + matrix: + cc_ver: [10, 11, 12] + conf: ['', '--enable-lto'] + steps: + - uses: AutoModality/action-clean@v1.1.0 + - uses: actions/checkout@v3 + - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="gcc-${{matrix.cc_ver}}" -e CXX="g++-${{matrix.cc_ver}}" + -e CONF="${CONF} ${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/build_static.sh + - name: Failure log + if: ${{ failure() }} + run: find . -name config.log -exec cat {} \; + Build_OS: if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }} runs-on: [self-hosted, ARM64] @@ -52,7 +72,7 @@ jobs: if: ${{ failure() }} run: find . -name config.log -exec cat {} \; - Build_gcc: + Build_gcc_u22: if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }} runs-on: [self-hosted, ARM64] env: @@ -61,7 +81,7 @@ jobs: fail-fast: false matrix: cc_ver: [10, 11, 12] - conf: ['', '--enable-lto'] + conf: ['', '--enable-abi-compat'] steps: - uses: AutoModality/action-clean@v1.1.0 - uses: actions/checkout@v3 @@ -177,6 +197,19 @@ jobs: if: ${{ failure() }} run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done + Run_process_mode: + if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }} + runs-on: [self-hosted, ARM64] + steps: + - uses: AutoModality/action-clean@v1.1.0 + - uses: actions/checkout@v3 + - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}" + -e CONF="${CONF}" -e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/process-mode.conf + -e ODPH_PROC_MODE=1 $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh + - name: Failure log + if: ${{ failure() }} + run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done + Run_dpdk-20_11: if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }} runs-on: [self-hosted, ARM64] diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml index a8ad4914d8..425ee17b6b 100644 --- a/.github/workflows/ci-pipeline.yml +++ b/.github/workflows/ci-pipeline.yml @@ -95,7 +95,7 @@ jobs: steps: - uses: actions/checkout@v3 - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="gcc-${{matrix.cc_ver}}" -e CXX="g++-${{matrix.cc_ver}}" - -e CONF="${CONF} ${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-static /odp/scripts/ci/build_static_${ARCH}.sh + -e CONF="${CONF} ${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-static /odp/scripts/ci/build_static.sh - name: Failure log if: ${{ failure() }} run: find . -name config.log -exec cat {} \; @@ -327,7 +327,8 @@ jobs: steps: - uses: actions/checkout@v3 - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}" - -e CONF="${CONF}" -e ODPH_PROC_MODE=1 $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh + -e CONF="${CONF}" -e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/process-mode.conf + -e ODPH_PROC_MODE=1 $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh - name: Failure log if: ${{ failure() }} run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done diff --git a/CHANGELOG b/CHANGELOG index eb80ee6cca..863b410020 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,58 @@ +== OpenDataPlane (1.39.0.0) + +=== Backward incompatible API changes +==== Classifier +* Deprecate `odp_cos_with_l2_priority()` function. Use `ODP_PMR_VLAN_PCP_0` +instead. +* Deprecate packet drop policy option (`odp_cls_cos_param_t.drop_policy`) and +related functions `odp_cos_drop()` and `odp_cos_drop_set()`. + +==== Shared Memory +* Change `odp_shm_info()` specification to disallow usage of invalid SHM +handles. + +=== Backward compatible API changes +==== Buffer +* Add multi variants of event conversion functions +(`odp_buffer_from_event_multi()` and `odp_buffer_to_event_multi()`). + +==== Classifier +* Add PFC priority level (`odp_bp_param_t.pfc_level`) to back pressure +parameters. +* Clarify PMR specification to state that in case of multiple PMRs matching +within a CoS, it is implementation specific which PMR is selected. + +==== Crypto +* Fix a stale reference to the renamed `hash_result_not_in_auth_range` +session parameter to use the correct name (`hash_result_in_auth_range`) +in the comment text of `hash_result_offset`. + +==== Packet +* Add support for additional L3 and L4 protocol types. + +==== Packet IO +* Add `ODP_PKTIN_MAX_QUEUES` define for maximum number of packet input queues. +* Add new packet input queue size configuration option +`odp_pktin_queue_param_t.queue_size` and matching capabilities +`odp_pktio_capability_t.min_input_queue_size` and +`odp_pktio_capability_t.max_input_queue_size`. +* Add missing documentation to `odp_pktio_link_status_t`, +`odp_pktio_link_duplex_t`, and `odp_pktio_link_pause_t` enumerations. +* Add `ODP_PKTIO_LINK_PFC_ON` enumeration for PFC flow control mode. +* Add capabilities (`odp_pktio_capability_t.flow_control`) and configuration +parameters to control reception (`odp_pktio_config_t.flow_control.pause_rx`) and +transmission (`odp_pktio_config_t.flow_control.pause_tx`) of Ethernet pause +frames. + +==== Shared Memory +* Add `odp_shm_segment_info()` function for retrieving information about each +memory segment of an SHM block. +* Clarified `odp_shm_reserve()` operation with the default options (no flags). + +==== System +* Add `odp_system_meminfo()` function for retrieving information about ODP +memory usage. + == OpenDataPlane (1.38.0.0) === Backward incompatible API changes diff --git a/configure.ac b/configure.ac index 3eaf6a8a28..9b3863cefa 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ AC_PREREQ([2.5]) # ODP API version ########################################################################## m4_define([odp_version_generation], [1]) -m4_define([odp_version_major], [38]) +m4_define([odp_version_major], [39]) m4_define([odp_version_minor], [0]) m4_define([odp_version_patch], [0]) diff --git a/example/classifier/odp_classifier.c b/example/classifier/odp_classifier.c index 99bebf4d43..552da53dc9 100644 --- a/example/classifier/odp_classifier.c +++ b/example/classifier/odp_classifier.c @@ -493,7 +493,7 @@ static odp_cos_t configure_default_cos(odp_pktio_t pktio, appl_args_t *args) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_default; cls_param.queue = queue_default; - cls_param.drop_policy = ODP_COS_DROP_POOL; + cos_default = odp_cls_cos_create(cos_name, &cls_param); if (cos_default == ODP_COS_INVALID) { @@ -571,7 +571,7 @@ static void configure_cos(odp_cos_t default_cos, appl_args_t *args) if (appl_args_gbl->cos_pools) stats->pool = cls_param.pool; cls_param.queue = stats->queue; - cls_param.drop_policy = ODP_COS_DROP_POOL; + stats->cos = odp_cls_cos_create(cos_name, &cls_param); odp_atomic_init_u64(&stats->queue_pkt_count, 0); diff --git a/include/Makefile.am b/include/Makefile.am index 1c714bfd0e..49ccf552ef 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -31,6 +31,7 @@ odpapiinclude_HEADERS = \ odp/api/hints.h \ odp/api/init.h \ odp/api/ipsec.h \ + odp/api/ipsec_types.h \ odp/api/packet.h \ odp/api/packet_types.h \ odp/api/packet_flags.h \ @@ -93,6 +94,7 @@ odpapispecinclude_HEADERS = \ odp/api/spec/hints.h \ odp/api/spec/init.h \ odp/api/spec/ipsec.h \ + odp/api/spec/ipsec_types.h \ odp/api/spec/packet.h \ odp/api/spec/packet_types.h \ odp/api/spec/packet_flags.h \ @@ -156,6 +158,7 @@ odpapiabidefaultinclude_HEADERS = \ odp/api/abi-default/hash.h \ odp/api/abi-default/init.h \ odp/api/abi-default/ipsec.h \ + odp/api/abi-default/ipsec_types.h \ odp/api/abi-default/packet.h \ odp/api/abi-default/packet_types.h \ odp/api/abi-default/packet_flags.h \ @@ -213,6 +216,7 @@ odpapiabiarchinclude_HEADERS = \ odp/arch/arm32-linux/odp/api/abi/hash.h \ odp/arch/arm32-linux/odp/api/abi/init.h \ odp/arch/arm32-linux/odp/api/abi/ipsec.h \ + odp/arch/arm32-linux/odp/api/abi/ipsec_types.h \ odp/arch/arm32-linux/odp/api/abi/packet.h \ odp/arch/arm32-linux/odp/api/abi/packet_types.h \ odp/arch/arm32-linux/odp/api/abi/packet_flags.h \ @@ -266,6 +270,7 @@ odpapiabiarchinclude_HEADERS = \ odp/arch/arm64-linux/odp/api/abi/hash.h \ odp/arch/arm64-linux/odp/api/abi/init.h \ odp/arch/arm64-linux/odp/api/abi/ipsec.h \ + odp/arch/arm64-linux/odp/api/abi/ipsec_types.h \ odp/arch/arm64-linux/odp/api/abi/packet.h \ odp/arch/arm64-linux/odp/api/abi/packet_types.h \ odp/arch/arm64-linux/odp/api/abi/packet_flags.h \ @@ -319,6 +324,7 @@ odpapiabiarchinclude_HEADERS = \ odp/arch/default-linux/odp/api/abi/hash.h \ odp/arch/default-linux/odp/api/abi/init.h \ odp/arch/default-linux/odp/api/abi/ipsec.h \ + odp/arch/default-linux/odp/api/abi/ipsec_types.h \ odp/arch/default-linux/odp/api/abi/packet.h \ odp/arch/default-linux/odp/api/abi/packet_types.h \ odp/arch/default-linux/odp/api/abi/packet_flags.h \ @@ -372,6 +378,7 @@ odpapiabiarchinclude_HEADERS = \ odp/arch/power64-linux/odp/api/abi/hash.h \ odp/arch/power64-linux/odp/api/abi/init.h \ odp/arch/power64-linux/odp/api/abi/ipsec.h \ + odp/arch/power64-linux/odp/api/abi/ipsec_types.h \ odp/arch/power64-linux/odp/api/abi/packet.h \ odp/arch/power64-linux/odp/api/abi/packet_types.h \ odp/arch/power64-linux/odp/api/abi/packet_flags.h \ @@ -425,6 +432,7 @@ odpapiabiarchinclude_HEADERS = \ odp/arch/x86_32-linux/odp/api/abi/hash.h \ odp/arch/x86_32-linux/odp/api/abi/init.h \ odp/arch/x86_32-linux/odp/api/abi/ipsec.h \ + odp/arch/x86_32-linux/odp/api/abi/ipsec_types.h \ odp/arch/x86_32-linux/odp/api/abi/packet.h \ odp/arch/x86_32-linux/odp/api/abi/packet_types.h \ odp/arch/x86_32-linux/odp/api/abi/packet_flags.h \ @@ -478,6 +486,7 @@ odpapiabiarchinclude_HEADERS = \ odp/arch/x86_64-linux/odp/api/abi/hash.h \ odp/arch/x86_64-linux/odp/api/abi/init.h \ odp/arch/x86_64-linux/odp/api/abi/ipsec.h \ + odp/arch/x86_64-linux/odp/api/abi/ipsec_types.h \ odp/arch/x86_64-linux/odp/api/abi/packet.h \ odp/arch/x86_64-linux/odp/api/abi/packet_types.h \ odp/arch/x86_64-linux/odp/api/abi/packet_flags.h \ diff --git a/include/odp/api/abi-default/ipsec.h b/include/odp/api/abi-default/ipsec.h index 2c95fd4f5e..1cbc257a11 100644 --- a/include/odp/api/abi-default/ipsec.h +++ b/include/odp/api/abi-default/ipsec.h @@ -1,15 +1,10 @@ /* Copyright (c) 2016-2018, Linaro Limited + * Copyright (c) 2022, Nokia * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ -/** - * @file - * - * ODP IPSEC API - platform specific types - */ - #ifndef ODP_ABI_IPSEC_H_ #define ODP_ABI_IPSEC_H_ @@ -17,22 +12,7 @@ extern "C" { #endif -#include - -/** @internal Dummy type for strong typing */ -typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_ipsec_sa_t; - -/** @ingroup odp_ipsec - * @{ - */ - -typedef _odp_abi_ipsec_sa_t *odp_ipsec_sa_t; - -#define ODP_IPSEC_SA_INVALID ((odp_ipsec_sa_t)0) - -/** - * @} - */ +/* Empty header required due to the packet inline functions */ #ifdef __cplusplus } diff --git a/include/odp/api/abi-default/ipsec_types.h b/include/odp/api/abi-default/ipsec_types.h new file mode 100644 index 0000000000..94fac6a203 --- /dev/null +++ b/include/odp/api/abi-default/ipsec_types.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2016-2018, Linaro Limited + * Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ODP_ABI_IPSEC_TYPES_H_ +#define ODP_ABI_IPSEC_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** @internal Dummy type for strong typing */ +typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_ipsec_sa_t; + +/** @ingroup odp_ipsec + * @{ + */ + +typedef _odp_abi_ipsec_sa_t *odp_ipsec_sa_t; + +#define ODP_IPSEC_SA_INVALID ((odp_ipsec_sa_t)0) + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/odp/api/abi-default/packet_io_types.h b/include/odp/api/abi-default/packet_io_types.h index ff77c80ad7..1c4785c461 100644 --- a/include/odp/api/abi-default/packet_io_types.h +++ b/include/odp/api/abi-default/packet_io_types.h @@ -53,6 +53,8 @@ typedef struct odp_pktout_queue_t { #define ODP_PKTIN_NO_WAIT 0 +#define ODP_PKTIN_MAX_QUEUES 64 + #define ODP_PKTOUT_MAX_QUEUES 64 #define ODP_PKTIO_STATS_EXTRA_NAME_LEN 64 diff --git a/include/odp/api/abi-default/packet_types.h b/include/odp/api/abi-default/packet_types.h index 9b886aa100..84f210b0e4 100644 --- a/include/odp/api/abi-default/packet_types.h +++ b/include/odp/api/abi-default/packet_types.h @@ -46,39 +46,6 @@ typedef _odp_abi_packet_tx_compl_t *odp_packet_tx_compl_t; #define ODP_PACKET_VECTOR_INVALID ((odp_packet_vector_t)0) #define ODP_PACKET_TX_COMPL_INVALID ((odp_packet_tx_compl_t)0) -typedef uint8_t odp_proto_l2_type_t; - -#define ODP_PROTO_L2_TYPE_NONE 0 -#define ODP_PROTO_L2_TYPE_ETH 1 - -typedef uint8_t odp_proto_l3_type_t; - -#define ODP_PROTO_L3_TYPE_NONE 0 -#define ODP_PROTO_L3_TYPE_ARP 1 -#define ODP_PROTO_L3_TYPE_RARP 2 -#define ODP_PROTO_L3_TYPE_MPLS 3 -#define ODP_PROTO_L3_TYPE_IPV4 4 -#define ODP_PROTO_L3_TYPE_IPV6 6 - -typedef uint8_t odp_proto_l4_type_t; - -/* Numbers from IANA Assigned Internet Protocol Numbers list */ -#define ODP_PROTO_L4_TYPE_NONE 0 -#define ODP_PROTO_L4_TYPE_ICMPV4 1 -#define ODP_PROTO_L4_TYPE_IGMP 2 -#define ODP_PROTO_L4_TYPE_IPV4 4 -#define ODP_PROTO_L4_TYPE_TCP 6 -#define ODP_PROTO_L4_TYPE_UDP 17 -#define ODP_PROTO_L4_TYPE_IPV6 41 -#define ODP_PROTO_L4_TYPE_GRE 47 -#define ODP_PROTO_L4_TYPE_ESP 50 -#define ODP_PROTO_L4_TYPE_AH 51 -#define ODP_PROTO_L4_TYPE_ICMPV6 58 -#define ODP_PROTO_L4_TYPE_NO_NEXT 59 -#define ODP_PROTO_L4_TYPE_IPCOMP 108 -#define ODP_PROTO_L4_TYPE_SCTP 132 -#define ODP_PROTO_L4_TYPE_ROHC 142 - /** Packet Color */ typedef enum { ODP_PACKET_GREEN = 0, diff --git a/include/odp/api/abi-default/shared_memory.h b/include/odp/api/abi-default/shared_memory.h index fdc93ea1dc..4668927cdb 100644 --- a/include/odp/api/abi-default/shared_memory.h +++ b/include/odp/api/abi-default/shared_memory.h @@ -23,6 +23,9 @@ typedef _odp_abi_shm_t *odp_shm_t; #define ODP_SHM_INVALID ((odp_shm_t)0) #define ODP_SHM_NAME_LEN 32 +#define ODP_SHM_IOVA_INVALID ((uint64_t)-1) +#define ODP_SHM_PA_INVALID ODP_SHM_IOVA_INVALID + /** * @} */ diff --git a/include/odp/api/ipsec_types.h b/include/odp/api/ipsec_types.h new file mode 100644 index 0000000000..9954f72158 --- /dev/null +++ b/include/odp/api/ipsec_types.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/** + * @file + * + * ODP IPsec + */ + +#ifndef ODP_API_IPSEC_TYPES_H_ +#define ODP_API_IPSEC_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/odp/api/spec/buffer.h b/include/odp/api/spec/buffer.h index b739e549f4..cfb85df17d 100644 --- a/include/odp/api/spec/buffer.h +++ b/include/odp/api/spec/buffer.h @@ -51,6 +51,17 @@ extern "C" { */ odp_buffer_t odp_buffer_from_event(odp_event_t ev); +/** + * Convert multiple buffer events to buffer handles + * + * All events must be of type ODP_EVENT_BUFFER. + * + * @param[out] buf Buffer handle array for output + * @param ev Array of event handles to convert + * @param num Number of buffers and events + */ +void odp_buffer_from_event_multi(odp_buffer_t buf[], const odp_event_t ev[], int num); + /** * Convert buffer handle to event * @@ -60,6 +71,15 @@ odp_buffer_t odp_buffer_from_event(odp_event_t ev); */ odp_event_t odp_buffer_to_event(odp_buffer_t buf); +/** + * Convert multiple buffer handles to events + * + * @param buf Array of buffer handles to convert + * @param[out] ev Event handle array for output + * @param num Number of buffers and events + */ +void odp_buffer_to_event_multi(const odp_buffer_t buf[], odp_event_t ev[], int num); + /** * Buffer start address * diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h index f368260580..0c4294a6a1 100644 --- a/include/odp/api/spec/classification.h +++ b/include/odp/api/spec/classification.h @@ -405,6 +405,15 @@ typedef struct odp_bp_param_t { */ odp_threshold_t threshold; + /** + * PFC priority level + * + * When enabled (#ODP_PKTIO_LINK_PFC_ON), PFC frames are generated when the above + * threshold is exceeded. The generated frames request the receiver to temporary halt + * transmission of traffic on this priority level (0 .. 7). + */ + uint8_t pfc_level; + } odp_bp_param_t; /** @@ -571,8 +580,12 @@ typedef struct odp_cls_capability_t { } odp_cls_capability_t; +#if ODP_DEPRECATED_API + /** * class of service packet drop policies + * + * @deprecated Drop policy will be removed from the API. */ typedef enum { ODP_COS_DROP_POOL, /**< Follow buffer pool drop policy */ @@ -580,6 +593,8 @@ typedef enum { } odp_cls_drop_t; +#endif + /** * Enumeration of actions for CoS. */ @@ -659,8 +674,10 @@ typedef struct odp_cls_cos_param { /** Pool associated with CoS */ odp_pool_t pool; +#if ODP_DEPRECATED_API /** Drop policy associated with CoS */ odp_cls_drop_t drop_policy; +#endif /** Random Early Detection configuration */ odp_red_param_t red; @@ -784,6 +801,8 @@ uint32_t odp_cls_cos_num_queue(odp_cos_t cos); */ uint32_t odp_cls_cos_queues(odp_cos_t cos, odp_queue_t queue[], uint32_t num); +#if ODP_DEPRECATED_API + /** * Assign packet drop policy for specific class-of-service * @@ -806,9 +825,12 @@ int odp_cos_drop_set(odp_cos_t cos, odp_cls_drop_t drop_policy); */ odp_cls_drop_t odp_cos_drop(odp_cos_t cos); +#endif + /** - * Request to override per-port class of service - * based on Layer-2 priority field if present. + * Request to override per-port class of service based on Layer-2 priority field if present. + * + * @deprecated Use #ODP_PMR_VLAN_PCP_0 instead. * * @param pktio_in Ingress port identifier. * @param num_qos Number of QoS levels, typically 8. @@ -819,10 +841,8 @@ odp_cls_drop_t odp_cos_drop(odp_cos_t cos); * @retval 0 on success * @retval <0 on failure */ -int odp_cos_with_l2_priority(odp_pktio_t pktio_in, - uint8_t num_qos, - uint8_t qos_table[], - odp_cos_t cos_table[]); +int ODP_DEPRECATE(odp_cos_with_l2_priority)(odp_pktio_t pktio_in, uint8_t num_qos, + uint8_t qos_table[], odp_cos_t cos_table[]); /** * Request to override per-port class of service based on Layer-3 priority field if present. @@ -902,39 +922,32 @@ void odp_cls_pmr_param_init(odp_pmr_param_t *param); void odp_cls_pmr_create_opt_init(odp_pmr_create_opt_t *opt); /** - * Create a packet matching rule + * Create Packet Matching Rule (PMR) * - * Create a packet match rule between source and destination class of service. - * This packet matching rule is applied on all packets arriving at the source - * class of service and packets satisfying this PMR are sent to the destination - * class of service. + * Creates a PMR between source and destination Class of Service (CoS). A packet arriving to + * a CoS is matched against all the PMRs that define it as their source CoS. A PMR match moves + * the packet from the source to the destination CoS. If multiple PMRs of a CoS match with + * the packet, it is implementation specific which PMR is selected. * - * A composite PMR rule is created when the number of terms in the match rule - * is more than one. The composite rule is considered as matching only if - * the packet satisfies all the terms in Packet Match Rule. - * The underlying platform may not support all or any specific combination - * of value match rules, and the application should take care - * of inspecting the return value when installing such rules, and perform - * appropriate fallback action. + * A composite PMR is created when PMR parameters define more than one term. A composite PMR is + * considered to match only if a packet matches with all its terms. It is implementation specific + * which term combinations are supported as composite PMRs. When creating a composite PMR, + * application should check the return value and perform appropriate fallback actions if the create + * call returns failure. * - * Use odp_cls_pmr_param_init() to initialize parameters into their default - * values. + * Use odp_cls_pmr_param_init() to initialize parameters into their default values. + * + * PMRs created with this function are equivant to PMRs created through odp_cls_pmr_create_opt() + * with the same PMR terms and with all additional options set to their default values (e.g. + * CLS mark is set to zero in all matching packets). * * @param terms Array of odp_pmr_param_t entries, one entry per term - * desired. - * @param num_terms Number of terms in the match rule. + * @param num_terms Number of terms in the PMR. * @param src_cos source CoS handle * @param dst_cos destination CoS handle * - * @return Handle to the Packet Match Rule. + * @return PMR handle on success * @retval ODP_PMR_INVALID on failure - * - * @note Matching PMR rule created through this function sets the CLS mark metadata - * of the packet to zero. - * - * @note Rules created through this function are equivalent to rules created through - * odp_cls_pmr_create_opt() with the same PMR terms and with the additional option - * fields set to their default values. */ odp_pmr_t odp_cls_pmr_create(const odp_pmr_param_t *terms, int num_terms, odp_cos_t src_cos, odp_cos_t dst_cos); diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h index 4f2961f3cd..a79a05ad3f 100644 --- a/include/odp/api/spec/crypto.h +++ b/include/odp/api/spec/crypto.h @@ -812,7 +812,7 @@ typedef struct odp_crypto_packet_op_param_t { * In case of encode sessions the calculated hash will be stored in * this offset. * - * If the hash_result_not_in_auth_range session parameter is false, + * If the hash_result_in_auth_range session parameter is true, * the hash result location may overlap auth_range. In that case the * result location will be zeroed in decode sessions before hash * calculation. Zeroing is not done in encode sessions. diff --git a/include/odp/api/spec/ipsec.h b/include/odp/api/spec/ipsec.h index 4a42eb5a46..b091961cca 100644 --- a/include/odp/api/spec/ipsec.h +++ b/include/odp/api/spec/ipsec.h @@ -1,5 +1,5 @@ /* Copyright (c) 2016-2018, Linaro Limited - * Copyright (c) 2021, Nokia + * Copyright (c) 2021-2022, Nokia * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -8,7 +8,7 @@ /** * @file * - * ODP IPSEC API + * ODP IPsec API */ #ifndef ODP_API_SPEC_IPSEC_H_ @@ -20,1084 +20,16 @@ extern "C" { #endif #include +#include +#include +#include #include -#include -#include -#include -#include /** @defgroup odp_ipsec ODP IPSEC * IPSEC protocol offload. * @{ */ -/** - * @typedef odp_ipsec_sa_t - * IPSEC Security Association (SA) - */ - - /** - * @def ODP_IPSEC_SA_INVALID - * Invalid IPSEC SA - */ - -/** - * IPSEC operation mode - */ -typedef enum odp_ipsec_op_mode_t { - /** Synchronous IPSEC operation - * - * Application uses synchronous IPSEC operations, - * which output all results on function return. - */ - ODP_IPSEC_OP_MODE_SYNC = 0, - - /** Asynchronous IPSEC operation - * - * Application uses asynchronous IPSEC operations, - * which return results via events. - */ - ODP_IPSEC_OP_MODE_ASYNC, - - /** Inline IPSEC operation - * - * Packet input/output is connected directly to IPSEC inbound/outbound - * processing. Application uses asynchronous or inline IPSEC - * operations. - * - * Inline processed inbound packets are delivered to the application - * in the same way as packets processed by odp_ipsec_in_enq(). - */ - ODP_IPSEC_OP_MODE_INLINE, - - /** IPSEC is disabled in inbound / outbound direction */ - ODP_IPSEC_OP_MODE_DISABLED - -} odp_ipsec_op_mode_t; - -/** - * IPSEC TEST SA operation - */ -typedef enum odp_ipsec_test_sa_operation_t { - /** Update next sequence number - * - * The seq_num parameter is an outbound SA specific parameter. - * Invoking the odp_ipsec_test_sa_update() API to update this - * field on an inbound SA will cause the API to return failure. - */ - ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM = 0, - - /** Update highest authenticated sequence number - * - * The antireplay_window_top parameter is inbound SA specific. - * Invoking the odp_ipsec_test_sa_update() API to update this - * field on an outbound SA will cause the API to return failure. - */ - ODP_IPSEC_TEST_SA_UPDATE_ANTIREPLAY_WINDOW_TOP - -} odp_ipsec_test_sa_operation_t; - -/** - * IPSEC TEST SA parameter - */ -typedef union odp_ipsec_test_sa_param_t { - /** Next sequence number - * - * @see ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM - */ - uint64_t seq_num; - - /** Highest authenticated sequence number - * - * @see ODP_IPSEC_TEST_SA_UPDATE_ANTIREPLAY_WINDOW_TOP - */ - uint64_t antireplay_window_top; - -} odp_ipsec_test_sa_param_t; - -/** - * Configuration options for IPSEC inbound processing - */ -typedef struct odp_ipsec_inbound_config_t { - /** Default destination queue for IPSEC events - * - * When inbound SA lookup fails in the asynchronous mode, - * resulting IPSEC events are enqueued into this queue. - */ - odp_queue_t default_queue; - - /** Constraints for SPI values used with inbound SA lookup. Minimal - * SPI range and unique values may improve performance. */ - struct { - /** Minimum SPI value for SA lookup. Default value is 0. */ - uint32_t min_spi; - - /** Maximum SPI value for SA lookup. Default value is - * UINT32_MAX. */ - uint32_t max_spi; - - /** Select if SPI values for SA lookup are unique or may contain - * the same SPI value multiple times. The default value is 0. - * - * 0: All SAs in SA lookup have unique SPI value - * 1: The same SPI value may be used for multiple SAs - */ - odp_bool_t spi_overlap; - - } lookup; - - /** Retain outer headers - * - * Select up to which protocol layer (at least) outer headers are - * retained in inbound inline processing. Default value is - * ODP_PROTO_LAYER_NONE. - * - * ODP_PROTO_LAYER_NONE: Application does not require any outer - * headers to be retained. - * - * ODP_PROTO_LAYER_L2: Retain headers up to layer 2. - * - * ODP_PROTO_LAYER_L3: Retain headers up to layer 3, otherwise the - * same as ODP_PROTO_LAYER_ALL. - * - * ODP_PROTO_LAYER_L4: Retain headers up to layer 4, otherwise the - * same as ODP_PROTO_LAYER_ALL. - * - * ODP_PROTO_LAYER_ALL: In tunnel mode, all headers before IPSEC are - * retained. In transport mode, all headers - * before IP (carrying IPSEC) are retained. - * - */ - odp_proto_layer_t retain_outer; - - /** Parse packet headers after IPSEC transformation - * - * Select header parsing level after inbound processing. Headers of the - * resulting packet must be checked (at least) up to this level. - * Parsing starts from IP (layer 3). Packet metadata from IP to this - * layer is set. In addition, offset (and pointer) to the next layer - * is set. Other layer/protocol specific metadata have undefined - * values. - * - * Each successfully transformed packet has a valid value for L3 offset - * regardless of the parse configuration. Default value is - * ODP_PROTO_LAYER_NONE. ODP_PROTO_LAYER_L2 is not a valid value. - */ - odp_proto_layer_t parse_level; - - /** Flags to control IPSEC payload data checks up to the selected parse - * level. Checksum checking status can be queried for each packet with - * odp_packet_l3_chksum_status() and odp_packet_l4_chksum_status(). - * Default value for all bits is 0 (skip all checksum checks). - */ - odp_proto_chksums_t chksums; - - /** Post-IPsec reassembly configuration - * - * This field provides global IPsec configuration parameters for - * fragment reassembly. The enable flag does not turn on reassembly - * but tells if reassembly may be enabled in SA parameters. - * - * The enable flag may be set only if retain_outer is - * ODP_PROTO_LAYER_NONE. - */ - odp_reass_config_t reassembly; - - /** Attempt reassembly after inbound IPsec processing in - * odp_ipsec_in_enq(). Default value is false. - */ - odp_bool_t reass_async; - - /** Attempt reassembly after inline inbound IPsec processing. - * Default value is false. - **/ - odp_bool_t reass_inline; - -} odp_ipsec_inbound_config_t; - -/** - * Configuration options for IPSEC outbound processing - */ -typedef struct odp_ipsec_outbound_config_t { - /** Flags to control L3/L4 checksum insertion as part of outbound - * packet processing. These flags control checksum insertion (for the - * payload packet) in the same way as the checksum flags in - * odp_pktout_config_opt_t control checksum insertion when sending - * packets out through a pktio interface. Also packet checksum override - * functions (e.g. odp_packet_l4_chksum_insert()) can be used in - * the same way. - */ - union { - /** Mapping for individual bits */ - struct { - /** Insert IPv4 header checksum on the payload packet - * before IPSEC transformation. Default value is 0. */ - uint32_t inner_ipv4 : 1; - - /** Insert UDP header checksum on the payload packet - * before IPSEC transformation. Default value is 0. */ - uint32_t inner_udp : 1; - - /** Insert TCP header checksum on the payload packet - * before IPSEC transformation. Default value is 0. */ - uint32_t inner_tcp : 1; - - /** Insert SCTP header checksum on the payload packet - * before IPSEC transformation. Default value is 0. */ - uint32_t inner_sctp : 1; - - } chksum; - - /** All bits of the bit field structure - * - * This field can be used to set/clear all flags, or bitwise - * operations over the entire structure. */ - uint32_t all_chksum; - }; - -} odp_ipsec_outbound_config_t; - -/** - * IPSEC TEST capability - */ -typedef struct odp_ipsec_test_capability_t { - /** Parameters supported for sa_update */ - struct { - /** Next sequence number value - * - * @see ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM - */ - odp_bool_t seq_num; - - /** Highest authenticated sequence number - * - * @see ODP_IPSEC_TEST_SA_UPDATE_ANTIREPLAY_WINDOW_TOP - */ - odp_bool_t antireplay_window_top; - - } sa_operations; - -} odp_ipsec_test_capability_t; - -/** - * IPSEC capability - */ -typedef struct odp_ipsec_capability_t { - /** Maximum number of IPSEC SAs */ - uint32_t max_num_sa; - - /** Synchronous IPSEC operation mode (ODP_IPSEC_OP_MODE_SYNC) support */ - odp_support_t op_mode_sync; - - /** - * Asynchronous IPSEC operation mode (ODP_IPSEC_OP_MODE_ASYNC) support - */ - odp_support_t op_mode_async; - - /** - * Inline inbound IPSEC operation mode (ODP_IPSEC_OP_MODE_INLINE) - * support - */ - odp_support_t op_mode_inline_in; - - /** - * Inline outgoing IPSEC operation mode (ODP_IPSEC_OP_MODE_INLINE) - * support - */ - odp_support_t op_mode_inline_out; - - /** IP Authenticated Header (ODP_IPSEC_AH) support */ - odp_support_t proto_ah; - - /** Fragment after IPsec support */ - odp_support_t frag_after; - - /** Fragment before IPsec support */ - odp_support_t frag_before; - - /** - * Support of pipelined classification (ODP_IPSEC_PIPELINE_CLS) of - * resulting inbound packets - */ - odp_support_t pipeline_cls; - - /** - * Support of retaining outer headers (retain_outer) in inbound inline - * processed packets - */ - odp_support_t retain_header; - - /** - * Inner packet checksum check offload support in inbound direction. - */ - odp_proto_chksums_t chksums_in; - - /** Maximum number of different destination CoSes in classification - * pipelining. The same CoS may be used for many SAs. This is equal or - * less than 'max_cos' capability in classifier API. - */ - uint32_t max_cls_cos; - - /** - * Scheduled queue support - * - * 0: Scheduled queues are not supported either as IPsec SA destination - * queues or as IPsec default queue - * 1: Scheduled queues are supported as both IPsec SA destination queues - * and IPsec default queue - * @see odp_ipsec_sa_param_t - */ - odp_bool_t queue_type_sched; - - /** - * Plain queue support - * - * 0: Plain queues are not supported either as IPsec SA destination - * queues or as IPsec default queue - * 1: Plain queues are supported as both IPsec SA destination queues and - * IPsec default queue - * @see odp_ipsec_sa_param_t - */ - odp_bool_t queue_type_plain; - - /** Maximum number of different destination queues. The same queue may - * be used for many SAs. */ - uint32_t max_queues; - - /** Support for returning completion packets as vectors */ - odp_pktin_vector_capability_t vector; - - /** Maximum anti-replay window size. */ - uint32_t max_antireplay_ws; - - /** Supported cipher algorithms */ - odp_crypto_cipher_algos_t ciphers; - - /** Supported authentication algorithms */ - odp_crypto_auth_algos_t auths; - - /** Support of traffic manager (TM) after inline outbound IPSEC - * processing. On unsupported platforms, application is not allowed - * to use a TM enabled pktio (ODP_PKTOUT_MODE_TM) with outbound - * inline IPSEC. - * - * @see odp_pktio_open(), odp_pktio_param_t - */ - odp_support_t inline_ipsec_tm; - - /** IPSEC TEST capabilities - * - * @see odp_ipsec_test_sa_update() - */ - odp_ipsec_test_capability_t test; - - /** Post-IPsec reassembly capability */ - odp_reass_capability_t reassembly; - - /** Support of reassembly after inbound processing in odp_ipsec_in_enq() */ - odp_bool_t reass_async; - - /** Support of reassembly after inline inbound IPsec processing */ - odp_bool_t reass_inline; - -} odp_ipsec_capability_t; - -/** - * Cipher algorithm capabilities - */ -typedef struct odp_ipsec_cipher_capability_t { - /** Key length in bytes */ - uint32_t key_len; - -} odp_ipsec_cipher_capability_t; - -/** - * Authentication algorithm capabilities - */ -typedef struct odp_ipsec_auth_capability_t { - /** Key length in bytes */ - uint32_t key_len; - - /** ICV length in bytes */ - uint32_t icv_len; -} odp_ipsec_auth_capability_t; - -/** - * IPSEC configuration options - */ -typedef struct odp_ipsec_config_t { - /** Inbound IPSEC operation mode. Application selects which mode - * will be used for inbound IPSEC operations. - * - * @see odp_ipsec_in(), odp_ipsec_in_enq() - */ - odp_ipsec_op_mode_t inbound_mode; - - /** Outbound IPSEC operation mode. Application selects which mode - * will be used for outbound IPSEC operations. - * - * @see odp_ipsec_out(), odp_ipsec_out_enq(), odp_ipsec_out_inline() - */ - odp_ipsec_op_mode_t outbound_mode; - - /** Maximum number of IPSEC SAs that application will use - * simultaneously */ - uint32_t max_num_sa; - - /** IPSEC inbound processing configuration */ - odp_ipsec_inbound_config_t inbound; - - /** IPSEC outbound processing configuration */ - odp_ipsec_outbound_config_t outbound; - - /** Enable stats collection - * - * Default value is false (stats collection disabled). - * - * @see odp_ipsec_stats(), odp_ipsec_stats_multi() - */ - odp_bool_t stats_en; - - /** - * Packet vector configuration for async and inline operations - * - * This packet vector configuration affects packets delivered to - * the application through the default queue and the SA destination - * queues. It does not affect packets delivered through pktio - * input queues. - */ - odp_pktin_vector_config_t vector; - -} odp_ipsec_config_t; - -/** - * IPSEC SA direction - */ -typedef enum odp_ipsec_dir_t { - /** Inbound IPSEC SA */ - ODP_IPSEC_DIR_INBOUND = 0, - - /** Outbound IPSEC SA */ - ODP_IPSEC_DIR_OUTBOUND - -} odp_ipsec_dir_t; - -/** - * IPSEC protocol mode - */ -typedef enum odp_ipsec_mode_t { - /** IPSEC tunnel mode */ - ODP_IPSEC_MODE_TUNNEL = 0, - - /** IPSEC transport mode */ - ODP_IPSEC_MODE_TRANSPORT - -} odp_ipsec_mode_t; - -/** - * IPSEC protocol - */ -typedef enum odp_ipsec_protocol_t { - /** ESP protocol */ - ODP_IPSEC_ESP = 0, - - /** AH protocol */ - ODP_IPSEC_AH - -} odp_ipsec_protocol_t; - -/** - * IPSEC tunnel type - */ -typedef enum odp_ipsec_tunnel_type_t { - /** Outer header is IPv4 */ - ODP_IPSEC_TUNNEL_IPV4 = 0, - - /** Outer header is IPv6 */ - ODP_IPSEC_TUNNEL_IPV6 - -} odp_ipsec_tunnel_type_t; - -/** - * IPSEC crypto parameters - */ -typedef struct odp_ipsec_crypto_param_t { - /** Cipher algorithm - * - * Select cipher algorithm to be used. ODP_CIPHER_ALG_NULL indicates - * that ciphering is disabled. See 'ciphers' field of - * odp_ipsec_capability_t for supported cipher algorithms. Algorithm - * descriptions can be found from odp_cipher_alg_t documentation. Note - * that some algorithms restrict choice of the pairing authentication - * algorithm. When ciphering is enabled, cipher key and potential extra - * key material (cipher_key_extra) need to be set. The default value - * is ODP_CIPHER_ALG_NULL. - */ - odp_cipher_alg_t cipher_alg; - - /** Cipher key */ - odp_crypto_key_t cipher_key; - - /** Extra keying material for cipher algorithm - * - * Additional data used as salt or nonce if the algorithm requires it, - * other algorithms ignore this field. These algorithms require this - * field to be set: - * - ODP_CIPHER_ALG_AES_CTR: 4 bytes of nonce - * - ODP_CIPHER_ALG_AES_GCM: 4 bytes of salt - * - ODP_CIPHER_ALG_AES_CCM: 3 bytes of salt - * - ODP_CIPHER_ALG_CHACHA20_POLY1305: 4 bytes of salt - */ - odp_crypto_key_t cipher_key_extra; - - /** Authentication algorithm - * - * Select authentication algorithm to be used. ODP_AUTH_ALG_NULL - * indicates that authentication is disabled. See 'auths' field of - * odp_ipsec_capability_t for supported authentication algorithms. - * Algorithm descriptions can be found from odp_auth_alg_t - * documentation. Note that some algorithms restrict choice of the - * pairing cipher algorithm. When single algorithm provides both - * ciphering and authentication (i.e. Authenticated Encryption), - * authentication side key information ('auth_key' and - * 'auth_key_extra') is ignored, and cipher side values are - * used instead. These algorithms ignore authentication side key - * information: ODP_AUTH_ALG_AES_GCM, ODP_AUTH_ALG_AES_CCM and - * ODP_AUTH_ALG_CHACHA20_POLY1305. Otherwise, authentication side - * parameters must be set when authentication is enabled. The default - * value is ODP_AUTH_ALG_NULL. - */ - odp_auth_alg_t auth_alg; - - /** Authentication key */ - odp_crypto_key_t auth_key; - - /** Extra keying material for authentication algorithm - * - * Additional data used as salt or nonce if the algorithm requires it, - * other algorithms ignore this field. These algorithms require this - * field to be set: - * - ODP_AUTH_ALG_AES_GMAC: 4 bytes of salt - */ - odp_crypto_key_t auth_key_extra; - - /** - * Length of integrity check value (ICV) in bytes. - * - * Some algorithms support multiple ICV lengths when used with IPsec. - * This field can be used to select a non-default ICV length. - * - * Zero value indicates that the default ICV length shall be used. - * The default length depends on the selected algorithm as follows: - * - * Algorithm Default length Other lengths - * ---------------------------------------------------------------- - * ODP_AUTH_ALG_NULL 0 - * ODP_AUTH_ALG_MD5_HMAC 12 - * ODP_AUTH_ALG_SHA1_HMAC 12 - * ODP_AUTH_ALG_SHA256_HMAC 16 - * ODP_AUTH_ALG_SHA384_HMAC 24 - * ODP_AUTH_ALG_SHA512_HMAC 32 - * ODP_AUTH_ALG_AES_GCM 16 8, 12 - * ODP_AUTH_ALG_AES_GMAC 16 - * ODP_AUTH_ALG_AES_CCM 16 8, 12 - * ODP_AUTH_ALG_AES_CMAC 12 - * ODP_AUTH_ALG_AES_XCBC_MAC 12 - * ODP_AUTH_ALG_CHACHA20_POLY1305 16 - * - * The requested ICV length must be supported for the selected - * algorithm as indicated by odp_ipsec_auth_capability(). - * - * The default value is 0. - */ - uint32_t icv_len; - -} odp_ipsec_crypto_param_t; - -/** IPv4 header parameters */ -typedef struct odp_ipsec_ipv4_param_t { - /** IPv4 source address (NETWORK ENDIAN) */ - void *src_addr; - - /** IPv4 destination address (NETWORK ENDIAN) */ - void *dst_addr; - - /** IPv4 Differentiated Services Code Point. The default value is 0. */ - uint8_t dscp; - - /** IPv4 Don't Fragment bit. The default value is 0. */ - uint8_t df; - - /** IPv4 Time To Live. The default value is 255. */ - uint8_t ttl; - -} odp_ipsec_ipv4_param_t; - -/** IPv6 header parameters */ -typedef struct odp_ipsec_ipv6_param_t { - /** IPv6 source address (NETWORK ENDIAN) */ - void *src_addr; - - /** IPv6 destination address (NETWORK ENDIAN) */ - void *dst_addr; - - /** IPv6 flow label. The default value is 0. */ - uint32_t flabel; - - /** IPv6 Differentiated Services Code Point. The default value is 0. */ - uint8_t dscp; - - /** IPv6 hop limit. The default value is 255. */ - uint8_t hlimit; - -} odp_ipsec_ipv6_param_t; - -/** - * IPSEC tunnel parameters - * - * These parameters are used to build outbound tunnel headers. All values are - * passed in CPU native byte / bit order if not specified otherwise. - * IP addresses must be in NETWORK byte order as those are passed in with - * pointers and copied byte-by-byte from memory to the packet. - */ -typedef struct odp_ipsec_tunnel_param_t { - /** Tunnel type: IPv4 or IPv6. The default is IPv4. */ - odp_ipsec_tunnel_type_t type; - - /** Tunnel type specific parameters */ - struct { - /** IPv4 header parameters */ - odp_ipsec_ipv4_param_t ipv4; - - /** IPv6 header parameters */ - odp_ipsec_ipv6_param_t ipv6; - }; -} odp_ipsec_tunnel_param_t; - -/** - * IPSEC SA option flags - */ -typedef struct odp_ipsec_sa_opt_t { - /** Extended Sequence Numbers (ESN) - * - * * 1: Use extended (64 bit) sequence numbers - * * 0: Use normal sequence numbers (the default value) - */ - uint32_t esn : 1; - - /** UDP encapsulation - * - * * 1: Do UDP encapsulation/decapsulation so that IPSEC packets can - * traverse through NAT boxes. - * * 0: No UDP encapsulation (the default value) - */ - uint32_t udp_encap : 1; - - /** Copy DSCP bits - * - * * 1: Copy IPv4 or IPv6 DSCP bits from inner IP header to - * the outer IP header in encapsulation, and vice versa in - * decapsulation. - * * 0: Use values from odp_ipsec_tunnel_param_t in encapsulation and - * do not change DSCP field in decapsulation (the default value). - */ - uint32_t copy_dscp : 1; - - /** Copy IPv6 Flow Label - * - * * 1: Copy IPv6 flow label from inner IPv6 header to the - * outer IPv6 header. - * * 0: Use value from odp_ipsec_tunnel_param_t (the default value) - */ - uint32_t copy_flabel : 1; - - /** Copy IPv4 Don't Fragment bit - * - * * 1: Copy the DF bit from the inner IPv4 header to the outer - * IPv4 header. - * * 0: Use value from odp_ipsec_tunnel_param_t (the default value) - */ - uint32_t copy_df : 1; - - /** Decrement inner packet Time To Live (TTL) field - * - * * 1: In tunnel mode, decrement inner packet IPv4 TTL or - * IPv6 Hop Limit after tunnel decapsulation, or before tunnel - * encapsulation. - * * 0: Inner packet is not modified (the default value) - */ - uint32_t dec_ttl : 1; - -} odp_ipsec_sa_opt_t; - -/** - * IPSEC SA lifetime limits - * - * These limits are used for setting up SA lifetime. IPSEC operations check - * against the limits and output a status code (e.g. soft_exp_bytes) when - * a limit is crossed. It's implementation defined how many times soft - * lifetime expiration is reported: only once, first N or all packets following - * the limit crossing. Any number of limits may be used simultaneously. - * Use zero when there is no limit. - * - * The default value is zero (i.e. no limit) for all the limits. - */ -typedef struct odp_ipsec_lifetime_t { - /** Soft expiry limits for the session */ - struct { - /** Limit in bytes */ - uint64_t bytes; - - /** Limit in packet */ - uint64_t packets; - } soft_limit; - - /** Hard expiry limits for the session */ - struct { - /** Limit in bytes */ - uint64_t bytes; - - /** Limit in packet */ - uint64_t packets; - } hard_limit; -} odp_ipsec_lifetime_t; - -/** - * Fragmentation mode - * - * These options control outbound IP packet fragmentation offload. When offload - * is enabled, IPSEC operation will determine if fragmentation is needed and - * does it according to the mode. - */ -typedef enum odp_ipsec_frag_mode_t { - /** Do not fragment IP packets */ - ODP_IPSEC_FRAG_DISABLED = 0, - - /** Fragment IP packet before IPSEC operation */ - ODP_IPSEC_FRAG_BEFORE, - - /** Fragment IP packet after IPSEC operation */ - ODP_IPSEC_FRAG_AFTER, - - /** Only check if IP fragmentation is needed, - * do not fragment packets. */ - ODP_IPSEC_FRAG_CHECK -} odp_ipsec_frag_mode_t; - -/** - * Packet lookup mode - * - * Lookup mode controls how an SA participates in SA lookup offload. - * Inbound operations perform SA lookup if application does not provide a SA as - * a parameter. In inline mode, a lookup miss directs the packet back to normal - * packet input interface processing. SA lookup failure status - * (status.error.sa_lookup) is reported through odp_ipsec_packet_result_t. - */ -typedef enum odp_ipsec_lookup_mode_t { - /** Inbound SA lookup is disabled for the SA. */ - ODP_IPSEC_LOOKUP_DISABLED = 0, - - /** Inbound SA lookup is enabled. Lookup matches only SPI value. */ - ODP_IPSEC_LOOKUP_SPI, - - /** Inbound SA lookup is enabled. Lookup matches both SPI value and - * destination IP address. Functionality is otherwise identical to - * ODP_IPSEC_LOOKUP_SPI. */ - ODP_IPSEC_LOOKUP_DSTADDR_SPI - -} odp_ipsec_lookup_mode_t; - -/** - * IPSEC pipeline configuration - */ -typedef enum odp_ipsec_pipeline_t { - /** Do not pipeline. Send all resulting events to the application. */ - ODP_IPSEC_PIPELINE_NONE = 0, - - /** Send resulting packets to the classifier - * - * IPSEC capability 'pipeline_cls' determines if pipelined - * classification is supported. */ - ODP_IPSEC_PIPELINE_CLS - -} odp_ipsec_pipeline_t; - -/** - * IPSEC header type - */ -typedef enum odp_ipsec_ip_version_t { - /** Header is IPv4 */ - ODP_IPSEC_IPV4 = 4, - - /** Header is IPv6 */ - ODP_IPSEC_IPV6 = 6 - -} odp_ipsec_ip_version_t; - -/** - * IPSEC Security Association (SA) parameters - */ -typedef struct odp_ipsec_sa_param_t { - /** IPSEC SA direction: inbound or outbound */ - odp_ipsec_dir_t dir; - - /** IPSEC protocol: ESP or AH. The default value is ODP_IPSEC_ESP. */ - odp_ipsec_protocol_t proto; - - /** IPSEC protocol mode: transport or tunnel */ - odp_ipsec_mode_t mode; - - /** Parameters for crypto and authentication algorithms */ - odp_ipsec_crypto_param_t crypto; - - /** Various SA option flags */ - odp_ipsec_sa_opt_t opt; - - /** SA lifetime parameters */ - odp_ipsec_lifetime_t lifetime; - - /** SPI value */ - uint32_t spi; - - /** Destination queue for IPSEC events - * - * Operations in asynchronous or inline mode enqueue resulting events - * into this queue. The default queue ('default_queue') is used when - * SA is not known. - */ - odp_queue_t dest_queue; - - /** User defined SA context pointer - * - * User defined context pointer associated with the SA. - * The implementation may prefetch the context data. Default value - * of the pointer is NULL. - */ - void *context; - - /** Context data length - * - * User defined context data length in bytes for prefetching. - * The implementation may use this value as a hint for the number of - * context data bytes to prefetch. Default value is zero (no hint). - */ - uint32_t context_len; - - /** IPSEC SA direction dependent parameters */ - struct { - /** Inbound specific parameters */ - struct { - /** SA lookup mode - * The default value is ODP_IPSEC_LOOKUP_DISABLED. - */ - odp_ipsec_lookup_mode_t lookup_mode; - - /** Additional SA lookup parameters. Values are - * considered only in ODP_IPSEC_LOOKUP_DSTADDR_SPI - * lookup mode. */ - struct { - /** Select IP version */ - odp_ipsec_ip_version_t ip_version; - - /** IP destination address (NETWORK ENDIAN) to - * be matched in addition to SPI value. */ - void *dst_addr; - - } lookup_param; - - /** Minimum anti-replay window size. Use 0 to disable - * anti-replay service. The default value is 0. - */ - uint32_t antireplay_ws; - - /** Select pipelined destination for resulting events - * - * Asynchronous and inline modes generate events. - * Select where those events are sent. Inbound SAs may - * choose to use pipelined classification. The default - * value is ODP_IPSEC_PIPELINE_NONE. - */ - odp_ipsec_pipeline_t pipeline; - - /** Classifier destination CoS for resulting packets - * - * Successfully decapsulated packets are sent to - * classification through this CoS. Other resulting - * events are sent to 'dest_queue'. This field is - * considered only when 'pipeline' is - * ODP_IPSEC_PIPELINE_CLS. The CoS must not be shared - * between any pktio interface default CoS. The maximum - * number of different CoS supported is defined by - * IPSEC capability max_cls_cos. - */ - odp_cos_t dest_cos; - - /** Enable reassembly of IPsec tunneled fragments - * - * Attempt reassembly of fragments after IPsec tunnel - * decapsulation. - * - * Reassembly is attempted for inline or asynchronously - * processed packets, not for packets processed using - * the synchronous API function. - * - * Fragments received through different SAs will not be - * reassembled into the same packet. - * - * IPsec statistics reflect IPsec processing before - * reassembly and thus count all individual fragments. - * - * Reassembly may be enabled for an SA only if - * reassembly was enabled in the global IPsec - * configuration. - * - * Default value is false. - * - * @see odp_ipsec_config() - * - */ - odp_bool_t reassembly_en; - - } inbound; - - /** Outbound specific parameters */ - struct { - /** Parameters for tunnel mode */ - odp_ipsec_tunnel_param_t tunnel; - - /** Fragmentation mode - * The default value is ODP_IPSEC_FRAG_DISABLED. - */ - odp_ipsec_frag_mode_t frag_mode; - - /** MTU for outbound IP fragmentation offload - * - * This is the maximum length of IP packets that - * outbound IPSEC operations may produce. The value may - * be updated later with odp_ipsec_sa_mtu_update(). - */ - uint32_t mtu; - - } outbound; - }; - -} odp_ipsec_sa_param_t; - -/** - * IPSEC stats content - */ -typedef struct odp_ipsec_stats_t { - /** Number of packets processed successfully */ - uint64_t success; - - /** Number of packets with protocol errors */ - uint64_t proto_err; - - /** Number of packets with authentication errors */ - uint64_t auth_err; - - /** Number of packets with antireplay check failures */ - uint64_t antireplay_err; - - /** Number of packets with algorithm errors */ - uint64_t alg_err; - - /** Number of packes with MTU errors */ - uint64_t mtu_err; - - /** Number of packets with hard lifetime(bytes) expired */ - uint64_t hard_exp_bytes_err; - - /** Number of packets with hard lifetime(packets) expired */ - uint64_t hard_exp_pkts_err; - - /** Total bytes of packet data processed by IPsec SA in success cases - * - * The range of packet bytes included in the success_bytes count is - * implementation defined but includes at least the bytes input for - * encryption or bytes output after decryption in ESP or the bytes - * authenticated in AH. - */ - uint64_t success_bytes; -} odp_ipsec_stats_t; - -/** - * IPSEC SA information - */ -typedef struct odp_ipsec_sa_info_t { - /** IPsec SA parameters - * - * This is not necessarily an exact copy of the actual parameter - * structure used in SA creation. The fields that were relevant - * for the SA in the creation phase will have the same values, - * but other fields, such as tunnel parameters for a transport - * mode SA, will have undefined values. - */ - odp_ipsec_sa_param_t param; - - /** IPSEC SA direction dependent parameters */ - union { - /** Inbound specific parameters */ - struct { - /** Additional SA lookup parameters. */ - struct { - /** IP destination address (NETWORK ENDIAN) to - * be matched in addition to SPI value. */ - uint8_t dst_addr[ODP_IPV6_ADDR_SIZE]; - } lookup_param; - - /** Antireplay window size - * - * Antireplay window size configured for the SA. - * This value can be different from what application - * had requested. - */ - uint32_t antireplay_ws; - - /** Antireplay window top - * - * Sequence number representing a recent top of the - * anti-replay window. There may be a delay before the - * SA state is reflected in the value. The value will be - * zero if no packets have been processed or if the - * anti-replay service is not enabled. - */ - uint64_t antireplay_window_top; - } inbound; - - /** Outbound specific parameters */ - struct { - /** Sequence number - * - * Sequence number used for a recently processed packet. - * There may be a delay before the SA state is reflected - * in the value. When no packets have been processed, - * the value will be zero. - */ - uint64_t seq_num; - - /** Tunnel IP address */ - union { - /** IPv4 */ - struct { - /** IPv4 source address */ - uint8_t src_addr[ODP_IPV4_ADDR_SIZE]; - /** IPv4 destination address */ - uint8_t dst_addr[ODP_IPV4_ADDR_SIZE]; - } ipv4; - - /** IPv6 */ - struct { - /** IPv6 source address */ - uint8_t src_addr[ODP_IPV6_ADDR_SIZE]; - /** IPv6 destination address */ - uint8_t dst_addr[ODP_IPV6_ADDR_SIZE]; - } ipv6; - } tunnel; - } outbound; - }; -} odp_ipsec_sa_info_t; - /** * Query IPSEC capabilities * @@ -1253,394 +185,6 @@ int odp_ipsec_sa_destroy(odp_ipsec_sa_t sa); */ uint64_t odp_ipsec_sa_to_u64(odp_ipsec_sa_t sa); -/** IPSEC operation status has no errors */ -#define ODP_IPSEC_OK 0 - -/** IPSEC errors */ -typedef struct odp_ipsec_error_t { - /** IPSEC errors */ - union { - /** Error bits */ - struct { - /** Protocol error. Not a valid ESP or AH packet, - * packet data length error, etc. */ - uint32_t proto : 1; - - /** SA lookup failed */ - uint32_t sa_lookup : 1; - - /** Authentication failed */ - uint32_t auth : 1; - - /** Anti-replay check failed */ - uint32_t antireplay : 1; - - /** Other algorithm error */ - uint32_t alg : 1; - - /** Packet does not fit into the given MTU size */ - uint32_t mtu : 1; - - /** Hard lifetime expired: bytes */ - uint32_t hard_exp_bytes : 1; - - /** Hard lifetime expired: packets */ - uint32_t hard_exp_packets : 1; - }; - - /** All error bits - * - * This field can be used to set, clear or compare - * multiple bits. For example, 'status.error.all != 0' - * checks if there are any errors. - */ - uint32_t all; - }; - -} odp_ipsec_error_t; - -/** IPSEC warnings */ -typedef struct odp_ipsec_warn_t { - /** IPSEC warnings */ - union { - /** Warning bits */ - struct { - /** Soft lifetime expired: bytes */ - uint32_t soft_exp_bytes : 1; - - /** Soft lifetime expired: packets */ - uint32_t soft_exp_packets : 1; - }; - - /** All warning bits - * - * This field can be used to set/clear all bits, or to perform - * bitwise operations over those. */ - uint32_t all; - }; - -} odp_ipsec_warn_t; - -/** IPSEC operation status */ -typedef struct odp_ipsec_op_status_t { - /** IPSEC status bits */ - union { - /** IPSEC errors and warnings */ - struct { - /** IPSEC errors */ - odp_ipsec_error_t error; - - /** IPSEC warnings */ - odp_ipsec_warn_t warn; - }; - - /** All status bits. Combines all error and warning bits. - * For example, 'status.all != ODP_IPSEC_OK' checks if there - * are any errors or warnings. */ - uint64_t all; - - }; - -} odp_ipsec_op_status_t; - -/** IPSEC operation flags */ -typedef struct odp_ipsec_op_flag_t { - /** IPSEC operations flags */ - union { - /** Operation flags */ - struct { - /** Packet was processed in inline mode */ - uint32_t inline_mode : 1; - - }; - - /** All flag bits - * - * This field can be used to set/clear all flags, or to perform - * bitwise operations over those. */ - uint32_t all; - }; - -} odp_ipsec_op_flag_t; - -/** - * IPSEC outbound operation options - * - * These may be used to override some SA level options - */ -typedef struct odp_ipsec_out_opt_t { - /** Union of all flag bits */ - union { - /** Option flags. Set flag for those options that are - * used, all other options are ignored. */ - struct { - /** Use fragmentation mode option */ - uint32_t frag_mode: 1; - - /** Use TFC padding length option */ - uint32_t tfc_pad: 1; - - /** Tunnel mode TFC dummy packet. This can be used only - * in tunnel mode. When the flag is set, packet length - * and content is ignored and instead a TFC dummy - * packet is created during IPSEC operation. The dummy - * packet length is defined by 'tfc_pad_len' option. - * If the SA is configured to copy IP header fields - * from inner IP packet, those fields must be passed - * with IP parameters option. */ - uint32_t tfc_dummy: 1; - - /** Use IP parameters option */ - uint32_t ip_param: 1; - - } flag; - - /** All flag bits - * - * This field can be used to set/clear all flags, or to perform - * bitwise operations over those. */ - uint32_t all_flags; - }; - - /** Fragmentation mode */ - odp_ipsec_frag_mode_t frag_mode; - - /** TFC padding length - * - * Number of TFC padding bytes added to the packet during IPSEC - * processing. Resulting packet should not exceed the maximum packet - * length of the pool, otherwise IPSEC operation may fail. - * Implementation guarantees that the padding does not contain any - * confidential information. */ - uint32_t tfc_pad_len; - - /** Union of IP parameters */ - union { - /** Override IPv4 parameters in outer header creation. - * IP addresses are ignored. */ - odp_ipsec_ipv4_param_t ipv4; - - /** Override IPv6 parameters in outer header creation. - * IP addresses are ignored. */ - odp_ipsec_ipv6_param_t ipv6; - }; - -} odp_ipsec_out_opt_t; - -/** - * IPSEC outbound operation parameters - */ -typedef struct odp_ipsec_out_param_t { - /** Number of SAs - * - * Outbound IPSEC operation needs SA from application. Use either - * single SA for all packets, or a SA per packet. - * - * Valid values are: - * - 1: Single SA for all packets - * - N: A SA per packet. N must match the number of packets. - */ - int num_sa; - - /** Number of outbound operation options - * - * Valid values are: - * - 0: No options - * - 1: Single option for all packets - * - N: An option per packet. N must match the number of packets. - */ - int num_opt; - - /** Pointer to an array of IPSEC SAs */ - const odp_ipsec_sa_t *sa; - - /** Pointer to an array of outbound operation options - * - * May be NULL when num_opt is zero. - */ - const odp_ipsec_out_opt_t *opt; - -} odp_ipsec_out_param_t; - -/** - * IPSEC inbound operation parameters - */ -typedef struct odp_ipsec_in_param_t { - /** Number of SAs - * - * Inbound IPSEC operation processes a packet using the SA provided by - * the application. If the application does not provide an SA, the - * operation searches for the SA by matching the input packet with all - * inbound SAs according to the lookup mode (odp_ipsec_lookup_mode_t) - * configured in each SA. When passing SAs, use either single SA for - * all packets, or a SA per packet. - * - * Valid values are: - * - 0: No SAs. SA lookup is done for all packets. - * - 1: Single SA for all packets - * - N: A SA per packet. N must match the number of packets. - */ - int num_sa; - - /** Pointer to an array of IPSEC SAs - * - * May be NULL when num_sa is zero. - */ - const odp_ipsec_sa_t *sa; - -} odp_ipsec_in_param_t; - -/** - * Outbound inline IPSEC operation parameters - */ -typedef struct odp_ipsec_out_inline_param_t { - /** Packet output interface for inline outbound operation without TM - * - * Outbound inline IPSEC operation uses this packet IO interface to - * output the packet after a successful IPSEC transformation. The pktio - * must have been configured to operate in inline IPSEC mode. - * - * The pktio must not have been configured with ODP_PKTOUT_MODE_TM. - * For IPSEC inline output to TM enabled interfaces set this field - * to ODP_PKTIO_INVALID and specify the TM queue to be used through - * the tm_queue parameter. Inline IPSEC output through TM can be - * done only if the platform has inline_ipsec_tm capability. - */ - odp_pktio_t pktio; - - /** TM queue for inline outbound operation - * - * TM queue to be used for inline IPSEC output when pktio field - * is ODP_PKTIO_INVALID, indicating use of TM. Otherwise ignored. - * - * @see odp_ipsec_capability() - */ - odp_tm_queue_t tm_queue; - - /** Outer headers for inline output operation - * - * Outbound inline IPSEC operation uses this information to prepend - * outer headers to the IPSEC packet before sending it out. - */ - struct { - /** Points to first byte of outer headers to be copied in - * front of the outgoing IPSEC packet. Implementation copies - * the headers during odp_ipsec_out_inline() call. - * - * Null value indicates that the outer headers are in the - * packet data, starting at L2 offset and ending at the byte - * before L3 offset. In this case, value of 'len' field must - * be greater than zero and set to L3 offset minus L2 offset. - */ - const uint8_t *ptr; - - /** Outer header length in bytes */ - uint32_t len; - } outer_hdr; - -} odp_ipsec_out_inline_param_t; - -/** - * IPSEC operation result for a packet - */ -typedef struct odp_ipsec_packet_result_t { - /** IPSEC operation status. Use this to check if IPSEC operation - * reported any errors or warnings (e.g. status.all != ODP_IPSEC_OK). - */ - odp_ipsec_op_status_t status; - - /** IPSEC operation flags */ - odp_ipsec_op_flag_t flag; - - /** IPSEC SA that was used to create the packet - * - * Operation updates this SA handle value, when SA look up is performed - * as part of the operation and the look up is successful. Operation - * status code indicates if the look up failed. Otherwise, the SA - * provided by the application is copied here. - */ - odp_ipsec_sa_t sa; - - /** Packet outer header status before inbound inline processing. - * This is valid only when outer headers are retained - * (see odp_ipsec_inbound_config_t) and flag.inline_mode is set. - */ - struct { - /** Points to the first byte of retained outer headers. These - * headers are stored in a contiquous, per packet, - * implementation specific memory space. Since the memory space - * may overlap with e.g. packet head/tailroom, the content - * becomes invalid if packet data storage is modified in - * any way. The memory space may not be shareable to other - * threads. */ - uint8_t *ptr; - - /** Outer header length in bytes */ - uint32_t len; - } outer_hdr; - - /** Total IP length of the original ESP or AH packet before IPsec - * decapsulation. This is valid only for inbound inline and async - * processed packets. Zero value means that the length information - * is not available. - * - * If the result packet was reassembled from multiple IPsec - * protected packets, this is the sum of the lengths of all the - * involved IPsec packets. - */ - uint32_t orig_ip_len; - -} odp_ipsec_packet_result_t; - -/** - * IPSEC status ID - */ -typedef enum odp_ipsec_status_id_t { - /** Response to SA disable command - * - * Following status event (odp_ipsec_status_t) fields have valid - * content, other fields must be ignored: - * - sa: The SA that was requested to be disabled - * - result: Operation result - */ - ODP_IPSEC_STATUS_SA_DISABLE = 0, - - /** Warning from inline IPSEC processing - * - * Following status event (odp_ipsec_status_t) fields have valid - * content, other fields must be ignored: - * - sa: The SA that caused the warning - * - warn: The warning(s) reported by this event - * - * This status event is generated only for outbound SAs in - * ODP_IPSEC_OP_MODE_INLINE mode. - */ - ODP_IPSEC_STATUS_WARN - -} odp_ipsec_status_id_t; - -/** - * IPSEC status content - */ -typedef struct odp_ipsec_status_t { - /** IPSEC status ID */ - odp_ipsec_status_id_t id; - - /** IPSEC SA that was target of the operation */ - odp_ipsec_sa_t sa; - - /** Result of the operation - * - * 0: Success - * <0: Failure - */ - int result; - - /** Warnings of an ODP_IPSEC_STATUS_WARN status event */ - odp_ipsec_warn_t warn; - -} odp_ipsec_status_t; - /** * Inbound synchronous IPSEC operation * diff --git a/include/odp/api/spec/ipsec_types.h b/include/odp/api/spec/ipsec_types.h new file mode 100644 index 0000000000..b74d26d4ff --- /dev/null +++ b/include/odp/api/spec/ipsec_types.h @@ -0,0 +1,1497 @@ +/* Copyright (c) 2016-2018, Linaro Limited + * Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/** + * @file + * + * ODP IPsec API type definitions + */ + +#ifndef ODP_API_SPEC_IPSEC_TYPES_H_ +#define ODP_API_SPEC_IPSEC_TYPES_H_ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include + +/** @addtogroup odp_ipsec + * @{ + */ + +/** + * @typedef odp_ipsec_sa_t + * IPSEC Security Association (SA) + */ + + /** + * @def ODP_IPSEC_SA_INVALID + * Invalid IPSEC SA + */ + +/** + * IPSEC operation mode + */ +typedef enum odp_ipsec_op_mode_t { + /** Synchronous IPSEC operation + * + * Application uses synchronous IPSEC operations, + * which output all results on function return. + */ + ODP_IPSEC_OP_MODE_SYNC = 0, + + /** Asynchronous IPSEC operation + * + * Application uses asynchronous IPSEC operations, + * which return results via events. + */ + ODP_IPSEC_OP_MODE_ASYNC, + + /** Inline IPSEC operation + * + * Packet input/output is connected directly to IPSEC inbound/outbound + * processing. Application uses asynchronous or inline IPSEC + * operations. + * + * Inline processed inbound packets are delivered to the application + * in the same way as packets processed by odp_ipsec_in_enq(). + */ + ODP_IPSEC_OP_MODE_INLINE, + + /** IPSEC is disabled in inbound / outbound direction */ + ODP_IPSEC_OP_MODE_DISABLED + +} odp_ipsec_op_mode_t; + +/** + * IPSEC TEST SA operation + */ +typedef enum odp_ipsec_test_sa_operation_t { + /** Update next sequence number + * + * The seq_num parameter is an outbound SA specific parameter. + * Invoking the odp_ipsec_test_sa_update() API to update this + * field on an inbound SA will cause the API to return failure. + */ + ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM = 0, + + /** Update highest authenticated sequence number + * + * The antireplay_window_top parameter is inbound SA specific. + * Invoking the odp_ipsec_test_sa_update() API to update this + * field on an outbound SA will cause the API to return failure. + */ + ODP_IPSEC_TEST_SA_UPDATE_ANTIREPLAY_WINDOW_TOP + +} odp_ipsec_test_sa_operation_t; + +/** + * IPSEC TEST SA parameter + */ +typedef union odp_ipsec_test_sa_param_t { + /** Next sequence number + * + * @see ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM + */ + uint64_t seq_num; + + /** Highest authenticated sequence number + * + * @see ODP_IPSEC_TEST_SA_UPDATE_ANTIREPLAY_WINDOW_TOP + */ + uint64_t antireplay_window_top; + +} odp_ipsec_test_sa_param_t; + +/** + * Configuration options for IPSEC inbound processing + */ +typedef struct odp_ipsec_inbound_config_t { + /** Default destination queue for IPSEC events + * + * When inbound SA lookup fails in the asynchronous mode, + * resulting IPSEC events are enqueued into this queue. + */ + odp_queue_t default_queue; + + /** Constraints for SPI values used with inbound SA lookup. Minimal + * SPI range and unique values may improve performance. */ + struct { + /** Minimum SPI value for SA lookup. Default value is 0. */ + uint32_t min_spi; + + /** Maximum SPI value for SA lookup. Default value is + * UINT32_MAX. */ + uint32_t max_spi; + + /** Select if SPI values for SA lookup are unique or may contain + * the same SPI value multiple times. The default value is 0. + * + * 0: All SAs in SA lookup have unique SPI value + * 1: The same SPI value may be used for multiple SAs + */ + odp_bool_t spi_overlap; + + } lookup; + + /** Retain outer headers + * + * Select up to which protocol layer (at least) outer headers are + * retained in inbound inline processing. Default value is + * ODP_PROTO_LAYER_NONE. + * + * ODP_PROTO_LAYER_NONE: Application does not require any outer + * headers to be retained. + * + * ODP_PROTO_LAYER_L2: Retain headers up to layer 2. + * + * ODP_PROTO_LAYER_L3: Retain headers up to layer 3, otherwise the + * same as ODP_PROTO_LAYER_ALL. + * + * ODP_PROTO_LAYER_L4: Retain headers up to layer 4, otherwise the + * same as ODP_PROTO_LAYER_ALL. + * + * ODP_PROTO_LAYER_ALL: In tunnel mode, all headers before IPSEC are + * retained. In transport mode, all headers + * before IP (carrying IPSEC) are retained. + * + */ + odp_proto_layer_t retain_outer; + + /** Parse packet headers after IPSEC transformation + * + * Select header parsing level after inbound processing. Headers of the + * resulting packet must be checked (at least) up to this level. + * Parsing starts from IP (layer 3). Packet metadata from IP to this + * layer is set. In addition, offset (and pointer) to the next layer + * is set. Other layer/protocol specific metadata have undefined + * values. + * + * Each successfully transformed packet has a valid value for L3 offset + * regardless of the parse configuration. Default value is + * ODP_PROTO_LAYER_NONE. ODP_PROTO_LAYER_L2 is not a valid value. + */ + odp_proto_layer_t parse_level; + + /** Flags to control IPSEC payload data checks up to the selected parse + * level. Checksum checking status can be queried for each packet with + * odp_packet_l3_chksum_status() and odp_packet_l4_chksum_status(). + * Default value for all bits is 0 (skip all checksum checks). + */ + odp_proto_chksums_t chksums; + + /** Post-IPsec reassembly configuration + * + * This field provides global IPsec configuration parameters for + * fragment reassembly. The enable flag does not turn on reassembly + * but tells if reassembly may be enabled in SA parameters. + * + * The enable flag may be set only if retain_outer is + * ODP_PROTO_LAYER_NONE. + */ + odp_reass_config_t reassembly; + + /** Attempt reassembly after inbound IPsec processing in + * odp_ipsec_in_enq(). Default value is false. + */ + odp_bool_t reass_async; + + /** Attempt reassembly after inline inbound IPsec processing. + * Default value is false. + **/ + odp_bool_t reass_inline; + +} odp_ipsec_inbound_config_t; + +/** + * Configuration options for IPSEC outbound processing + */ +typedef struct odp_ipsec_outbound_config_t { + /** Flags to control L3/L4 checksum insertion as part of outbound + * packet processing. These flags control checksum insertion (for the + * payload packet) in the same way as the checksum flags in + * odp_pktout_config_opt_t control checksum insertion when sending + * packets out through a pktio interface. Also packet checksum override + * functions (e.g. odp_packet_l4_chksum_insert()) can be used in + * the same way. + */ + union { + /** Mapping for individual bits */ + struct { + /** Insert IPv4 header checksum on the payload packet + * before IPSEC transformation. Default value is 0. */ + uint32_t inner_ipv4 : 1; + + /** Insert UDP header checksum on the payload packet + * before IPSEC transformation. Default value is 0. */ + uint32_t inner_udp : 1; + + /** Insert TCP header checksum on the payload packet + * before IPSEC transformation. Default value is 0. */ + uint32_t inner_tcp : 1; + + /** Insert SCTP header checksum on the payload packet + * before IPSEC transformation. Default value is 0. */ + uint32_t inner_sctp : 1; + + } chksum; + + /** All bits of the bit field structure + * + * This field can be used to set/clear all flags, or bitwise + * operations over the entire structure. */ + uint32_t all_chksum; + }; + +} odp_ipsec_outbound_config_t; + +/** + * IPSEC TEST capability + */ +typedef struct odp_ipsec_test_capability_t { + /** Parameters supported for sa_update */ + struct { + /** Next sequence number value + * + * @see ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM + */ + odp_bool_t seq_num; + + /** Highest authenticated sequence number + * + * @see ODP_IPSEC_TEST_SA_UPDATE_ANTIREPLAY_WINDOW_TOP + */ + odp_bool_t antireplay_window_top; + + } sa_operations; + +} odp_ipsec_test_capability_t; + +/** + * IPSEC capability + */ +typedef struct odp_ipsec_capability_t { + /** Maximum number of IPSEC SAs */ + uint32_t max_num_sa; + + /** Synchronous IPSEC operation mode (ODP_IPSEC_OP_MODE_SYNC) support */ + odp_support_t op_mode_sync; + + /** + * Asynchronous IPSEC operation mode (ODP_IPSEC_OP_MODE_ASYNC) support + */ + odp_support_t op_mode_async; + + /** + * Inline inbound IPSEC operation mode (ODP_IPSEC_OP_MODE_INLINE) + * support + */ + odp_support_t op_mode_inline_in; + + /** + * Inline outgoing IPSEC operation mode (ODP_IPSEC_OP_MODE_INLINE) + * support + */ + odp_support_t op_mode_inline_out; + + /** IP Authenticated Header (ODP_IPSEC_AH) support */ + odp_support_t proto_ah; + + /** Fragment after IPsec support */ + odp_support_t frag_after; + + /** Fragment before IPsec support */ + odp_support_t frag_before; + + /** + * Support of pipelined classification (ODP_IPSEC_PIPELINE_CLS) of + * resulting inbound packets + */ + odp_support_t pipeline_cls; + + /** + * Support of retaining outer headers (retain_outer) in inbound inline + * processed packets + */ + odp_support_t retain_header; + + /** + * Inner packet checksum check offload support in inbound direction. + */ + odp_proto_chksums_t chksums_in; + + /** Maximum number of different destination CoSes in classification + * pipelining. The same CoS may be used for many SAs. This is equal or + * less than 'max_cos' capability in classifier API. + */ + uint32_t max_cls_cos; + + /** + * Scheduled queue support + * + * 0: Scheduled queues are not supported either as IPsec SA destination + * queues or as IPsec default queue + * 1: Scheduled queues are supported as both IPsec SA destination queues + * and IPsec default queue + * @see odp_ipsec_sa_param_t + */ + odp_bool_t queue_type_sched; + + /** + * Plain queue support + * + * 0: Plain queues are not supported either as IPsec SA destination + * queues or as IPsec default queue + * 1: Plain queues are supported as both IPsec SA destination queues and + * IPsec default queue + * @see odp_ipsec_sa_param_t + */ + odp_bool_t queue_type_plain; + + /** Maximum number of different destination queues. The same queue may + * be used for many SAs. */ + uint32_t max_queues; + + /** Support for returning completion packets as vectors */ + odp_pktin_vector_capability_t vector; + + /** Maximum anti-replay window size. */ + uint32_t max_antireplay_ws; + + /** Supported cipher algorithms */ + odp_crypto_cipher_algos_t ciphers; + + /** Supported authentication algorithms */ + odp_crypto_auth_algos_t auths; + + /** Support of traffic manager (TM) after inline outbound IPSEC + * processing. On unsupported platforms, application is not allowed + * to use a TM enabled pktio (ODP_PKTOUT_MODE_TM) with outbound + * inline IPSEC. + * + * @see odp_pktio_open(), odp_pktio_param_t + */ + odp_support_t inline_ipsec_tm; + + /** IPSEC TEST capabilities + * + * @see odp_ipsec_test_sa_update() + */ + odp_ipsec_test_capability_t test; + + /** Post-IPsec reassembly capability */ + odp_reass_capability_t reassembly; + + /** Support of reassembly after inbound processing in odp_ipsec_in_enq() */ + odp_bool_t reass_async; + + /** Support of reassembly after inline inbound IPsec processing */ + odp_bool_t reass_inline; + +} odp_ipsec_capability_t; + +/** + * Cipher algorithm capabilities + */ +typedef struct odp_ipsec_cipher_capability_t { + /** Key length in bytes */ + uint32_t key_len; + +} odp_ipsec_cipher_capability_t; + +/** + * Authentication algorithm capabilities + */ +typedef struct odp_ipsec_auth_capability_t { + /** Key length in bytes */ + uint32_t key_len; + + /** ICV length in bytes */ + uint32_t icv_len; +} odp_ipsec_auth_capability_t; + +/** + * IPSEC configuration options + */ +typedef struct odp_ipsec_config_t { + /** Inbound IPSEC operation mode. Application selects which mode + * will be used for inbound IPSEC operations. + * + * @see odp_ipsec_in(), odp_ipsec_in_enq() + */ + odp_ipsec_op_mode_t inbound_mode; + + /** Outbound IPSEC operation mode. Application selects which mode + * will be used for outbound IPSEC operations. + * + * @see odp_ipsec_out(), odp_ipsec_out_enq(), odp_ipsec_out_inline() + */ + odp_ipsec_op_mode_t outbound_mode; + + /** Maximum number of IPSEC SAs that application will use + * simultaneously */ + uint32_t max_num_sa; + + /** IPSEC inbound processing configuration */ + odp_ipsec_inbound_config_t inbound; + + /** IPSEC outbound processing configuration */ + odp_ipsec_outbound_config_t outbound; + + /** Enable stats collection + * + * Default value is false (stats collection disabled). + * + * @see odp_ipsec_stats(), odp_ipsec_stats_multi() + */ + odp_bool_t stats_en; + + /** + * Packet vector configuration for async and inline operations + * + * This packet vector configuration affects packets delivered to + * the application through the default queue and the SA destination + * queues. It does not affect packets delivered through pktio + * input queues. + */ + odp_pktin_vector_config_t vector; + +} odp_ipsec_config_t; + +/** + * IPSEC SA direction + */ +typedef enum odp_ipsec_dir_t { + /** Inbound IPSEC SA */ + ODP_IPSEC_DIR_INBOUND = 0, + + /** Outbound IPSEC SA */ + ODP_IPSEC_DIR_OUTBOUND + +} odp_ipsec_dir_t; + +/** + * IPSEC protocol mode + */ +typedef enum odp_ipsec_mode_t { + /** IPSEC tunnel mode */ + ODP_IPSEC_MODE_TUNNEL = 0, + + /** IPSEC transport mode */ + ODP_IPSEC_MODE_TRANSPORT + +} odp_ipsec_mode_t; + +/** + * IPSEC protocol + */ +typedef enum odp_ipsec_protocol_t { + /** ESP protocol */ + ODP_IPSEC_ESP = 0, + + /** AH protocol */ + ODP_IPSEC_AH + +} odp_ipsec_protocol_t; + +/** + * IPSEC tunnel type + */ +typedef enum odp_ipsec_tunnel_type_t { + /** Outer header is IPv4 */ + ODP_IPSEC_TUNNEL_IPV4 = 0, + + /** Outer header is IPv6 */ + ODP_IPSEC_TUNNEL_IPV6 + +} odp_ipsec_tunnel_type_t; + +/** + * IPSEC crypto parameters + */ +typedef struct odp_ipsec_crypto_param_t { + /** Cipher algorithm + * + * Select cipher algorithm to be used. ODP_CIPHER_ALG_NULL indicates + * that ciphering is disabled. See 'ciphers' field of + * odp_ipsec_capability_t for supported cipher algorithms. Algorithm + * descriptions can be found from odp_cipher_alg_t documentation. Note + * that some algorithms restrict choice of the pairing authentication + * algorithm. When ciphering is enabled, cipher key and potential extra + * key material (cipher_key_extra) need to be set. The default value + * is ODP_CIPHER_ALG_NULL. + */ + odp_cipher_alg_t cipher_alg; + + /** Cipher key */ + odp_crypto_key_t cipher_key; + + /** Extra keying material for cipher algorithm + * + * Additional data used as salt or nonce if the algorithm requires it, + * other algorithms ignore this field. These algorithms require this + * field to be set: + * - ODP_CIPHER_ALG_AES_CTR: 4 bytes of nonce + * - ODP_CIPHER_ALG_AES_GCM: 4 bytes of salt + * - ODP_CIPHER_ALG_AES_CCM: 3 bytes of salt + * - ODP_CIPHER_ALG_CHACHA20_POLY1305: 4 bytes of salt + */ + odp_crypto_key_t cipher_key_extra; + + /** Authentication algorithm + * + * Select authentication algorithm to be used. ODP_AUTH_ALG_NULL + * indicates that authentication is disabled. See 'auths' field of + * odp_ipsec_capability_t for supported authentication algorithms. + * Algorithm descriptions can be found from odp_auth_alg_t + * documentation. Note that some algorithms restrict choice of the + * pairing cipher algorithm. When single algorithm provides both + * ciphering and authentication (i.e. Authenticated Encryption), + * authentication side key information ('auth_key' and + * 'auth_key_extra') is ignored, and cipher side values are + * used instead. These algorithms ignore authentication side key + * information: ODP_AUTH_ALG_AES_GCM, ODP_AUTH_ALG_AES_CCM and + * ODP_AUTH_ALG_CHACHA20_POLY1305. Otherwise, authentication side + * parameters must be set when authentication is enabled. The default + * value is ODP_AUTH_ALG_NULL. + */ + odp_auth_alg_t auth_alg; + + /** Authentication key */ + odp_crypto_key_t auth_key; + + /** Extra keying material for authentication algorithm + * + * Additional data used as salt or nonce if the algorithm requires it, + * other algorithms ignore this field. These algorithms require this + * field to be set: + * - ODP_AUTH_ALG_AES_GMAC: 4 bytes of salt + */ + odp_crypto_key_t auth_key_extra; + + /** + * Length of integrity check value (ICV) in bytes. + * + * Some algorithms support multiple ICV lengths when used with IPsec. + * This field can be used to select a non-default ICV length. + * + * Zero value indicates that the default ICV length shall be used. + * The default length depends on the selected algorithm as follows: + * + * Algorithm Default length Other lengths + * ---------------------------------------------------------------- + * ODP_AUTH_ALG_NULL 0 + * ODP_AUTH_ALG_MD5_HMAC 12 + * ODP_AUTH_ALG_SHA1_HMAC 12 + * ODP_AUTH_ALG_SHA256_HMAC 16 + * ODP_AUTH_ALG_SHA384_HMAC 24 + * ODP_AUTH_ALG_SHA512_HMAC 32 + * ODP_AUTH_ALG_AES_GCM 16 8, 12 + * ODP_AUTH_ALG_AES_GMAC 16 + * ODP_AUTH_ALG_AES_CCM 16 8, 12 + * ODP_AUTH_ALG_AES_CMAC 12 + * ODP_AUTH_ALG_AES_XCBC_MAC 12 + * ODP_AUTH_ALG_CHACHA20_POLY1305 16 + * + * The requested ICV length must be supported for the selected + * algorithm as indicated by odp_ipsec_auth_capability(). + * + * The default value is 0. + */ + uint32_t icv_len; + +} odp_ipsec_crypto_param_t; + +/** IPv4 header parameters */ +typedef struct odp_ipsec_ipv4_param_t { + /** IPv4 source address (NETWORK ENDIAN) */ + void *src_addr; + + /** IPv4 destination address (NETWORK ENDIAN) */ + void *dst_addr; + + /** IPv4 Differentiated Services Code Point. The default value is 0. */ + uint8_t dscp; + + /** IPv4 Don't Fragment bit. The default value is 0. */ + uint8_t df; + + /** IPv4 Time To Live. The default value is 255. */ + uint8_t ttl; + +} odp_ipsec_ipv4_param_t; + +/** IPv6 header parameters */ +typedef struct odp_ipsec_ipv6_param_t { + /** IPv6 source address (NETWORK ENDIAN) */ + void *src_addr; + + /** IPv6 destination address (NETWORK ENDIAN) */ + void *dst_addr; + + /** IPv6 flow label. The default value is 0. */ + uint32_t flabel; + + /** IPv6 Differentiated Services Code Point. The default value is 0. */ + uint8_t dscp; + + /** IPv6 hop limit. The default value is 255. */ + uint8_t hlimit; + +} odp_ipsec_ipv6_param_t; + +/** + * IPSEC tunnel parameters + * + * These parameters are used to build outbound tunnel headers. All values are + * passed in CPU native byte / bit order if not specified otherwise. + * IP addresses must be in NETWORK byte order as those are passed in with + * pointers and copied byte-by-byte from memory to the packet. + */ +typedef struct odp_ipsec_tunnel_param_t { + /** Tunnel type: IPv4 or IPv6. The default is IPv4. */ + odp_ipsec_tunnel_type_t type; + + /** Tunnel type specific parameters */ + struct { + /** IPv4 header parameters */ + odp_ipsec_ipv4_param_t ipv4; + + /** IPv6 header parameters */ + odp_ipsec_ipv6_param_t ipv6; + }; +} odp_ipsec_tunnel_param_t; + +/** + * IPSEC SA option flags + */ +typedef struct odp_ipsec_sa_opt_t { + /** Extended Sequence Numbers (ESN) + * + * * 1: Use extended (64 bit) sequence numbers + * * 0: Use normal sequence numbers (the default value) + */ + uint32_t esn : 1; + + /** UDP encapsulation + * + * * 1: Do UDP encapsulation/decapsulation so that IPSEC packets can + * traverse through NAT boxes. + * * 0: No UDP encapsulation (the default value) + */ + uint32_t udp_encap : 1; + + /** Copy DSCP bits + * + * * 1: Copy IPv4 or IPv6 DSCP bits from inner IP header to + * the outer IP header in encapsulation, and vice versa in + * decapsulation. + * * 0: Use values from odp_ipsec_tunnel_param_t in encapsulation and + * do not change DSCP field in decapsulation (the default value). + */ + uint32_t copy_dscp : 1; + + /** Copy IPv6 Flow Label + * + * * 1: Copy IPv6 flow label from inner IPv6 header to the + * outer IPv6 header. + * * 0: Use value from odp_ipsec_tunnel_param_t (the default value) + */ + uint32_t copy_flabel : 1; + + /** Copy IPv4 Don't Fragment bit + * + * * 1: Copy the DF bit from the inner IPv4 header to the outer + * IPv4 header. + * * 0: Use value from odp_ipsec_tunnel_param_t (the default value) + */ + uint32_t copy_df : 1; + + /** Decrement inner packet Time To Live (TTL) field + * + * * 1: In tunnel mode, decrement inner packet IPv4 TTL or + * IPv6 Hop Limit after tunnel decapsulation, or before tunnel + * encapsulation. + * * 0: Inner packet is not modified (the default value) + */ + uint32_t dec_ttl : 1; + +} odp_ipsec_sa_opt_t; + +/** + * IPSEC SA lifetime limits + * + * These limits are used for setting up SA lifetime. IPSEC operations check + * against the limits and output a status code (e.g. soft_exp_bytes) when + * a limit is crossed. It's implementation defined how many times soft + * lifetime expiration is reported: only once, first N or all packets following + * the limit crossing. Any number of limits may be used simultaneously. + * Use zero when there is no limit. + * + * The default value is zero (i.e. no limit) for all the limits. + */ +typedef struct odp_ipsec_lifetime_t { + /** Soft expiry limits for the session */ + struct { + /** Limit in bytes */ + uint64_t bytes; + + /** Limit in packet */ + uint64_t packets; + } soft_limit; + + /** Hard expiry limits for the session */ + struct { + /** Limit in bytes */ + uint64_t bytes; + + /** Limit in packet */ + uint64_t packets; + } hard_limit; +} odp_ipsec_lifetime_t; + +/** + * Fragmentation mode + * + * These options control outbound IP packet fragmentation offload. When offload + * is enabled, IPSEC operation will determine if fragmentation is needed and + * does it according to the mode. + */ +typedef enum odp_ipsec_frag_mode_t { + /** Do not fragment IP packets */ + ODP_IPSEC_FRAG_DISABLED = 0, + + /** Fragment IP packet before IPSEC operation */ + ODP_IPSEC_FRAG_BEFORE, + + /** Fragment IP packet after IPSEC operation */ + ODP_IPSEC_FRAG_AFTER, + + /** Only check if IP fragmentation is needed, + * do not fragment packets. */ + ODP_IPSEC_FRAG_CHECK +} odp_ipsec_frag_mode_t; + +/** + * Packet lookup mode + * + * Lookup mode controls how an SA participates in SA lookup offload. + * Inbound operations perform SA lookup if application does not provide a SA as + * a parameter. In inline mode, a lookup miss directs the packet back to normal + * packet input interface processing. SA lookup failure status + * (status.error.sa_lookup) is reported through odp_ipsec_packet_result_t. + */ +typedef enum odp_ipsec_lookup_mode_t { + /** Inbound SA lookup is disabled for the SA. */ + ODP_IPSEC_LOOKUP_DISABLED = 0, + + /** Inbound SA lookup is enabled. Lookup matches only SPI value. */ + ODP_IPSEC_LOOKUP_SPI, + + /** Inbound SA lookup is enabled. Lookup matches both SPI value and + * destination IP address. Functionality is otherwise identical to + * ODP_IPSEC_LOOKUP_SPI. */ + ODP_IPSEC_LOOKUP_DSTADDR_SPI + +} odp_ipsec_lookup_mode_t; + +/** + * IPSEC pipeline configuration + */ +typedef enum odp_ipsec_pipeline_t { + /** Do not pipeline. Send all resulting events to the application. */ + ODP_IPSEC_PIPELINE_NONE = 0, + + /** Send resulting packets to the classifier + * + * IPSEC capability 'pipeline_cls' determines if pipelined + * classification is supported. */ + ODP_IPSEC_PIPELINE_CLS + +} odp_ipsec_pipeline_t; + +/** + * IPSEC header type + */ +typedef enum odp_ipsec_ip_version_t { + /** Header is IPv4 */ + ODP_IPSEC_IPV4 = 4, + + /** Header is IPv6 */ + ODP_IPSEC_IPV6 = 6 + +} odp_ipsec_ip_version_t; + +/** + * IPSEC Security Association (SA) parameters + */ +typedef struct odp_ipsec_sa_param_t { + /** IPSEC SA direction: inbound or outbound */ + odp_ipsec_dir_t dir; + + /** IPSEC protocol: ESP or AH. The default value is ODP_IPSEC_ESP. */ + odp_ipsec_protocol_t proto; + + /** IPSEC protocol mode: transport or tunnel */ + odp_ipsec_mode_t mode; + + /** Parameters for crypto and authentication algorithms */ + odp_ipsec_crypto_param_t crypto; + + /** Various SA option flags */ + odp_ipsec_sa_opt_t opt; + + /** SA lifetime parameters */ + odp_ipsec_lifetime_t lifetime; + + /** SPI value */ + uint32_t spi; + + /** Destination queue for IPSEC events + * + * Operations in asynchronous or inline mode enqueue resulting events + * into this queue. The default queue ('default_queue') is used when + * SA is not known. + */ + odp_queue_t dest_queue; + + /** User defined SA context pointer + * + * User defined context pointer associated with the SA. + * The implementation may prefetch the context data. Default value + * of the pointer is NULL. + */ + void *context; + + /** Context data length + * + * User defined context data length in bytes for prefetching. + * The implementation may use this value as a hint for the number of + * context data bytes to prefetch. Default value is zero (no hint). + */ + uint32_t context_len; + + /** IPSEC SA direction dependent parameters */ + struct { + /** Inbound specific parameters */ + struct { + /** SA lookup mode + * The default value is ODP_IPSEC_LOOKUP_DISABLED. + */ + odp_ipsec_lookup_mode_t lookup_mode; + + /** Additional SA lookup parameters. Values are + * considered only in ODP_IPSEC_LOOKUP_DSTADDR_SPI + * lookup mode. */ + struct { + /** Select IP version */ + odp_ipsec_ip_version_t ip_version; + + /** IP destination address (NETWORK ENDIAN) to + * be matched in addition to SPI value. */ + void *dst_addr; + + } lookup_param; + + /** Minimum anti-replay window size. Use 0 to disable + * anti-replay service. The default value is 0. + */ + uint32_t antireplay_ws; + + /** Select pipelined destination for resulting events + * + * Asynchronous and inline modes generate events. + * Select where those events are sent. Inbound SAs may + * choose to use pipelined classification. The default + * value is ODP_IPSEC_PIPELINE_NONE. + */ + odp_ipsec_pipeline_t pipeline; + + /** Classifier destination CoS for resulting packets + * + * Successfully decapsulated packets are sent to + * classification through this CoS. Other resulting + * events are sent to 'dest_queue'. This field is + * considered only when 'pipeline' is + * ODP_IPSEC_PIPELINE_CLS. The CoS must not be shared + * between any pktio interface default CoS. The maximum + * number of different CoS supported is defined by + * IPSEC capability max_cls_cos. + */ + odp_cos_t dest_cos; + + /** Enable reassembly of IPsec tunneled fragments + * + * Attempt reassembly of fragments after IPsec tunnel + * decapsulation. + * + * Reassembly is attempted for inline or asynchronously + * processed packets, not for packets processed using + * the synchronous API function. + * + * Fragments received through different SAs will not be + * reassembled into the same packet. + * + * IPsec statistics reflect IPsec processing before + * reassembly and thus count all individual fragments. + * + * Reassembly may be enabled for an SA only if + * reassembly was enabled in the global IPsec + * configuration. + * + * Default value is false. + * + * @see odp_ipsec_config() + * + */ + odp_bool_t reassembly_en; + + } inbound; + + /** Outbound specific parameters */ + struct { + /** Parameters for tunnel mode */ + odp_ipsec_tunnel_param_t tunnel; + + /** Fragmentation mode + * The default value is ODP_IPSEC_FRAG_DISABLED. + */ + odp_ipsec_frag_mode_t frag_mode; + + /** MTU for outbound IP fragmentation offload + * + * This is the maximum length of IP packets that + * outbound IPSEC operations may produce. The value may + * be updated later with odp_ipsec_sa_mtu_update(). + */ + uint32_t mtu; + + } outbound; + }; + +} odp_ipsec_sa_param_t; + +/** + * IPSEC stats content + */ +typedef struct odp_ipsec_stats_t { + /** Number of packets processed successfully */ + uint64_t success; + + /** Number of packets with protocol errors */ + uint64_t proto_err; + + /** Number of packets with authentication errors */ + uint64_t auth_err; + + /** Number of packets with antireplay check failures */ + uint64_t antireplay_err; + + /** Number of packets with algorithm errors */ + uint64_t alg_err; + + /** Number of packes with MTU errors */ + uint64_t mtu_err; + + /** Number of packets with hard lifetime(bytes) expired */ + uint64_t hard_exp_bytes_err; + + /** Number of packets with hard lifetime(packets) expired */ + uint64_t hard_exp_pkts_err; + + /** Total bytes of packet data processed by IPsec SA in success cases + * + * The range of packet bytes included in the success_bytes count is + * implementation defined but includes at least the bytes input for + * encryption or bytes output after decryption in ESP or the bytes + * authenticated in AH. + */ + uint64_t success_bytes; +} odp_ipsec_stats_t; + +/** + * IPSEC SA information + */ +typedef struct odp_ipsec_sa_info_t { + /** IPsec SA parameters + * + * This is not necessarily an exact copy of the actual parameter + * structure used in SA creation. The fields that were relevant + * for the SA in the creation phase will have the same values, + * but other fields, such as tunnel parameters for a transport + * mode SA, will have undefined values. + */ + odp_ipsec_sa_param_t param; + + /** IPSEC SA direction dependent parameters */ + union { + /** Inbound specific parameters */ + struct { + /** Additional SA lookup parameters. */ + struct { + /** IP destination address (NETWORK ENDIAN) to + * be matched in addition to SPI value. */ + uint8_t dst_addr[ODP_IPV6_ADDR_SIZE]; + } lookup_param; + + /** Antireplay window size + * + * Antireplay window size configured for the SA. + * This value can be different from what application + * had requested. + */ + uint32_t antireplay_ws; + + /** Antireplay window top + * + * Sequence number representing a recent top of the + * anti-replay window. There may be a delay before the + * SA state is reflected in the value. The value will be + * zero if no packets have been processed or if the + * anti-replay service is not enabled. + */ + uint64_t antireplay_window_top; + } inbound; + + /** Outbound specific parameters */ + struct { + /** Sequence number + * + * Sequence number used for a recently processed packet. + * There may be a delay before the SA state is reflected + * in the value. When no packets have been processed, + * the value will be zero. + */ + uint64_t seq_num; + + /** Tunnel IP address */ + union { + /** IPv4 */ + struct { + /** IPv4 source address */ + uint8_t src_addr[ODP_IPV4_ADDR_SIZE]; + /** IPv4 destination address */ + uint8_t dst_addr[ODP_IPV4_ADDR_SIZE]; + } ipv4; + + /** IPv6 */ + struct { + /** IPv6 source address */ + uint8_t src_addr[ODP_IPV6_ADDR_SIZE]; + /** IPv6 destination address */ + uint8_t dst_addr[ODP_IPV6_ADDR_SIZE]; + } ipv6; + } tunnel; + } outbound; + }; +} odp_ipsec_sa_info_t; + +/** IPSEC operation status has no errors */ +#define ODP_IPSEC_OK 0 + +/** IPSEC errors */ +typedef struct odp_ipsec_error_t { + /** IPSEC errors */ + union { + /** Error bits */ + struct { + /** Protocol error. Not a valid ESP or AH packet, + * packet data length error, etc. */ + uint32_t proto : 1; + + /** SA lookup failed */ + uint32_t sa_lookup : 1; + + /** Authentication failed */ + uint32_t auth : 1; + + /** Anti-replay check failed */ + uint32_t antireplay : 1; + + /** Other algorithm error */ + uint32_t alg : 1; + + /** Packet does not fit into the given MTU size */ + uint32_t mtu : 1; + + /** Hard lifetime expired: bytes */ + uint32_t hard_exp_bytes : 1; + + /** Hard lifetime expired: packets */ + uint32_t hard_exp_packets : 1; + }; + + /** All error bits + * + * This field can be used to set, clear or compare + * multiple bits. For example, 'status.error.all != 0' + * checks if there are any errors. + */ + uint32_t all; + }; + +} odp_ipsec_error_t; + +/** IPSEC warnings */ +typedef struct odp_ipsec_warn_t { + /** IPSEC warnings */ + union { + /** Warning bits */ + struct { + /** Soft lifetime expired: bytes */ + uint32_t soft_exp_bytes : 1; + + /** Soft lifetime expired: packets */ + uint32_t soft_exp_packets : 1; + }; + + /** All warning bits + * + * This field can be used to set/clear all bits, or to perform + * bitwise operations over those. */ + uint32_t all; + }; + +} odp_ipsec_warn_t; + +/** IPSEC operation status */ +typedef struct odp_ipsec_op_status_t { + /** IPSEC status bits */ + union { + /** IPSEC errors and warnings */ + struct { + /** IPSEC errors */ + odp_ipsec_error_t error; + + /** IPSEC warnings */ + odp_ipsec_warn_t warn; + }; + + /** All status bits. Combines all error and warning bits. + * For example, 'status.all != ODP_IPSEC_OK' checks if there + * are any errors or warnings. */ + uint64_t all; + + }; + +} odp_ipsec_op_status_t; + +/** IPSEC operation flags */ +typedef struct odp_ipsec_op_flag_t { + /** IPSEC operations flags */ + union { + /** Operation flags */ + struct { + /** Packet was processed in inline mode */ + uint32_t inline_mode : 1; + + }; + + /** All flag bits + * + * This field can be used to set/clear all flags, or to perform + * bitwise operations over those. */ + uint32_t all; + }; + +} odp_ipsec_op_flag_t; + +/** + * IPSEC outbound operation options + * + * These may be used to override some SA level options + */ +typedef struct odp_ipsec_out_opt_t { + /** Union of all flag bits */ + union { + /** Option flags. Set flag for those options that are + * used, all other options are ignored. */ + struct { + /** Use fragmentation mode option */ + uint32_t frag_mode: 1; + + /** Use TFC padding length option */ + uint32_t tfc_pad: 1; + + /** Tunnel mode TFC dummy packet. This can be used only + * in tunnel mode. When the flag is set, packet length + * and content is ignored and instead a TFC dummy + * packet is created during IPSEC operation. The dummy + * packet length is defined by 'tfc_pad_len' option. + * If the SA is configured to copy IP header fields + * from inner IP packet, those fields must be passed + * with IP parameters option. */ + uint32_t tfc_dummy: 1; + + /** Use IP parameters option */ + uint32_t ip_param: 1; + + } flag; + + /** All flag bits + * + * This field can be used to set/clear all flags, or to perform + * bitwise operations over those. */ + uint32_t all_flags; + }; + + /** Fragmentation mode */ + odp_ipsec_frag_mode_t frag_mode; + + /** TFC padding length + * + * Number of TFC padding bytes added to the packet during IPSEC + * processing. Resulting packet should not exceed the maximum packet + * length of the pool, otherwise IPSEC operation may fail. + * Implementation guarantees that the padding does not contain any + * confidential information. */ + uint32_t tfc_pad_len; + + /** Union of IP parameters */ + union { + /** Override IPv4 parameters in outer header creation. + * IP addresses are ignored. */ + odp_ipsec_ipv4_param_t ipv4; + + /** Override IPv6 parameters in outer header creation. + * IP addresses are ignored. */ + odp_ipsec_ipv6_param_t ipv6; + }; + +} odp_ipsec_out_opt_t; + +/** + * IPSEC outbound operation parameters + */ +typedef struct odp_ipsec_out_param_t { + /** Number of SAs + * + * Outbound IPSEC operation needs SA from application. Use either + * single SA for all packets, or a SA per packet. + * + * Valid values are: + * - 1: Single SA for all packets + * - N: A SA per packet. N must match the number of packets. + */ + int num_sa; + + /** Number of outbound operation options + * + * Valid values are: + * - 0: No options + * - 1: Single option for all packets + * - N: An option per packet. N must match the number of packets. + */ + int num_opt; + + /** Pointer to an array of IPSEC SAs */ + const odp_ipsec_sa_t *sa; + + /** Pointer to an array of outbound operation options + * + * May be NULL when num_opt is zero. + */ + const odp_ipsec_out_opt_t *opt; + +} odp_ipsec_out_param_t; + +/** + * IPSEC inbound operation parameters + */ +typedef struct odp_ipsec_in_param_t { + /** Number of SAs + * + * Inbound IPSEC operation processes a packet using the SA provided by + * the application. If the application does not provide an SA, the + * operation searches for the SA by matching the input packet with all + * inbound SAs according to the lookup mode (odp_ipsec_lookup_mode_t) + * configured in each SA. When passing SAs, use either single SA for + * all packets, or a SA per packet. + * + * Valid values are: + * - 0: No SAs. SA lookup is done for all packets. + * - 1: Single SA for all packets + * - N: A SA per packet. N must match the number of packets. + */ + int num_sa; + + /** Pointer to an array of IPSEC SAs + * + * May be NULL when num_sa is zero. + */ + const odp_ipsec_sa_t *sa; + +} odp_ipsec_in_param_t; + +/** + * Outbound inline IPSEC operation parameters + */ +typedef struct odp_ipsec_out_inline_param_t { + /** Packet output interface for inline outbound operation without TM + * + * Outbound inline IPSEC operation uses this packet IO interface to + * output the packet after a successful IPSEC transformation. The pktio + * must have been configured to operate in inline IPSEC mode. + * + * The pktio must not have been configured with ODP_PKTOUT_MODE_TM. + * For IPSEC inline output to TM enabled interfaces set this field + * to ODP_PKTIO_INVALID and specify the TM queue to be used through + * the tm_queue parameter. Inline IPSEC output through TM can be + * done only if the platform has inline_ipsec_tm capability. + */ + odp_pktio_t pktio; + + /** TM queue for inline outbound operation + * + * TM queue to be used for inline IPSEC output when pktio field + * is ODP_PKTIO_INVALID, indicating use of TM. Otherwise ignored. + * + * @see odp_ipsec_capability() + */ + odp_tm_queue_t tm_queue; + + /** Outer headers for inline output operation + * + * Outbound inline IPSEC operation uses this information to prepend + * outer headers to the IPSEC packet before sending it out. + */ + struct { + /** Points to first byte of outer headers to be copied in + * front of the outgoing IPSEC packet. Implementation copies + * the headers during odp_ipsec_out_inline() call. + * + * Null value indicates that the outer headers are in the + * packet data, starting at L2 offset and ending at the byte + * before L3 offset. In this case, value of 'len' field must + * be greater than zero and set to L3 offset minus L2 offset. + */ + const uint8_t *ptr; + + /** Outer header length in bytes */ + uint32_t len; + } outer_hdr; + +} odp_ipsec_out_inline_param_t; + +/** + * IPSEC operation result for a packet + */ +typedef struct odp_ipsec_packet_result_t { + /** IPSEC operation status. Use this to check if IPSEC operation + * reported any errors or warnings (e.g. status.all != ODP_IPSEC_OK). + */ + odp_ipsec_op_status_t status; + + /** IPSEC operation flags */ + odp_ipsec_op_flag_t flag; + + /** IPSEC SA that was used to create the packet + * + * Operation updates this SA handle value, when SA look up is performed + * as part of the operation and the look up is successful. Operation + * status code indicates if the look up failed. Otherwise, the SA + * provided by the application is copied here. + */ + odp_ipsec_sa_t sa; + + /** Packet outer header status before inbound inline processing. + * This is valid only when outer headers are retained + * (see odp_ipsec_inbound_config_t) and flag.inline_mode is set. + */ + struct { + /** Points to the first byte of retained outer headers. These + * headers are stored in a contiquous, per packet, + * implementation specific memory space. Since the memory space + * may overlap with e.g. packet head/tailroom, the content + * becomes invalid if packet data storage is modified in + * any way. The memory space may not be shareable to other + * threads. */ + uint8_t *ptr; + + /** Outer header length in bytes */ + uint32_t len; + } outer_hdr; + + /** Total IP length of the original ESP or AH packet before IPsec + * decapsulation. This is valid only for inbound inline and async + * processed packets. Zero value means that the length information + * is not available. + * + * If the result packet was reassembled from multiple IPsec + * protected packets, this is the sum of the lengths of all the + * involved IPsec packets. + */ + uint32_t orig_ip_len; + +} odp_ipsec_packet_result_t; + +/** + * IPSEC status ID + */ +typedef enum odp_ipsec_status_id_t { + /** Response to SA disable command + * + * Following status event (odp_ipsec_status_t) fields have valid + * content, other fields must be ignored: + * - sa: The SA that was requested to be disabled + * - result: Operation result + */ + ODP_IPSEC_STATUS_SA_DISABLE = 0, + + /** Warning from inline IPSEC processing + * + * Following status event (odp_ipsec_status_t) fields have valid + * content, other fields must be ignored: + * - sa: The SA that caused the warning + * - warn: The warning(s) reported by this event + * + * This status event is generated only for outbound SAs in + * ODP_IPSEC_OP_MODE_INLINE mode. + */ + ODP_IPSEC_STATUS_WARN + +} odp_ipsec_status_id_t; + +/** + * IPSEC status content + */ +typedef struct odp_ipsec_status_t { + /** IPSEC status ID */ + odp_ipsec_status_id_t id; + + /** IPSEC SA that was target of the operation */ + odp_ipsec_sa_t sa; + + /** Result of the operation + * + * 0: Success + * <0: Failure + */ + int result; + + /** Warnings of an ODP_IPSEC_STATUS_WARN status event */ + odp_ipsec_warn_t warn; + +} odp_ipsec_status_t; + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#include +#endif diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h index a8d4caa8c7..51c7a8e46e 100644 --- a/include/odp/api/spec/packet.h +++ b/include/odp/api/spec/packet.h @@ -1685,6 +1685,11 @@ odp_proto_l2_type_t odp_packet_l2_type(odp_packet_t pkt); * * Returns layer 3 protocol type. Initial type value is ODP_PROTO_L3_TYPE_NONE. * + * In addition to protocol types specified in ODP_PROTO_L3_TYPE_* defines, + * the function may also return other L3 protocol types (e.g. from IEEE + * EtherTypes list) recognized by the parser. If protocol type is not + * recognized, ODP_PROTO_L3_TYPE_NONE is returned. + * * @param pkt Packet handle * * @return Layer 3 protocol type @@ -1696,6 +1701,11 @@ odp_proto_l3_type_t odp_packet_l3_type(odp_packet_t pkt); * * Returns layer 4 protocol type. Initial type value is ODP_PROTO_L4_TYPE_NONE. * + * In addition to protocol types specified in ODP_PROTO_L4_TYPE_* defines, + * the function may also return other L4 protocol types (e.g. from IANA protocol + * number list) recognized by the parser. If protocol type is not recognized, + * ODP_PROTO_L4_TYPE_NONE is returned. + * * @param pkt Packet handle * * @return Layer 4 protocol type diff --git a/include/odp/api/spec/packet_io_types.h b/include/odp/api/spec/packet_io_types.h index 153e188b79..fe86f6f12b 100644 --- a/include/odp/api/spec/packet_io_types.h +++ b/include/odp/api/spec/packet_io_types.h @@ -72,6 +72,12 @@ extern "C" { * Do not wait on packet input */ +/** + * @def ODP_PKTIN_MAX_QUEUES + * Maximum number of packet input queues supported by the API. Use + * odp_pktio_capability() to check the maximum number of queues per interface. + */ + /** * @def ODP_PKTOUT_MAX_QUEUES * Maximum number of packet output queues supported by the API. Use @@ -259,6 +265,16 @@ typedef struct odp_pktin_queue_param_t { * Queue type is defined by the input mode. The default value is 1. */ uint32_t num_queues; + /** Input queue size array + * + * An array containing queue sizes for each 'num_queues' input queues + * in ODP_PKTIN_MODE_DIRECT mode. The value of zero means + * implementation specific default size. Nonzero values must be between + * 'min_input_queue_size' and 'max_input_queue_size' capabilities. The + * implementation may round-up given values. The default value is zero. + */ + uint32_t queue_size[ODP_PKTIN_MAX_QUEUES]; + /** Queue parameters * * These are used for input queue creation in ODP_PKTIN_MODE_QUEUE @@ -546,6 +562,19 @@ typedef struct odp_pktio_parser_config_t { } odp_pktio_parser_config_t; +/** Ethernet flow control modes */ +typedef enum odp_pktio_link_pause_t { + /** Flow control mode is unknown */ + ODP_PKTIO_LINK_PAUSE_UNKNOWN = -1, + /** No flow control */ + ODP_PKTIO_LINK_PAUSE_OFF = 0, + /** Pause frame flow control enabled */ + ODP_PKTIO_LINK_PAUSE_ON = 1, + /** Priority-based Flow Control (PFC) enabled */ + ODP_PKTIO_LINK_PFC_ON = 2 + +} odp_pktio_link_pause_t; + /** * Packet IO configuration options * @@ -630,6 +659,47 @@ typedef struct odp_pktio_config_t { /** Packet input reassembly configuration */ odp_reass_config_t reassembly; + /** Link flow control configuration */ + struct { + /** Reception of flow control frames + * + * Configures interface operation when an Ethernet flow control frame is received: + * * ODP_PKTIO_LINK_PAUSE_OFF: Flow control is disabled + * * ODP_PKTIO_LINK_PAUSE_ON: Enable traditional Ethernet pause frame handling. + * When a pause frame is received, all packet output + * is halted temporarily. + * * ODP_PKTIO_LINK_PFC_ON: Enable Priority-based Flow Control (PFC) + * handling. When a PFC frame is received, packet + * output of certain (VLAN) class of service levels + * are halted temporarily. + * + * The default value is ODP_PKTIO_LINK_PAUSE_OFF. + */ + odp_pktio_link_pause_t pause_rx; + + /** Transmission of flow control frames + * + * Configures Ethernet flow control frame generation on the interface: + * * ODP_PKTIO_LINK_PAUSE_OFF: Flow control is disabled + * * ODP_PKTIO_LINK_PAUSE_ON: Enable traditional Ethernet pause frame + * generation. Pause frames are generated to request + * the remote end of the link to halt all + * transmissions temporarily. + * * ODP_PKTIO_LINK_PFC_ON: Enable Priority-based Flow Control (PFC) frame + * generation. PFC frames are generated to request + * the remote end of the link to halt transmission + * of certain (VLAN) class of service levels + * temporarily. + * + * When PFC is enabled, classifier API is used to configure CoS nodes with back + * pressure threshold and PFC priority level parameters (odp_bp_param_t). + * + * The default value is ODP_PKTIO_LINK_PAUSE_OFF. + */ + odp_pktio_link_pause_t pause_tx; + + } flow_control; + } odp_pktio_config_t; /** @@ -819,9 +889,21 @@ typedef struct odp_pktin_vector_capability_t { * ODP_PKTOUT_MODE_DIRECT mode. */ typedef struct odp_pktio_capability_t { - /** Maximum number of input queues */ + /** Maximum number of input queues + * + * Value does not exceed ODP_PKTIN_MAX_QUEUES. */ uint32_t max_input_queues; + /** Minimum input queue size + * + * Zero if configuring queue size is not supported. */ + uint32_t min_input_queue_size; + + /** Maximum input queue size + * + * Zero if configuring queue size is not supported. */ + uint32_t max_input_queue_size; + /** Maximum number of output queues * * Value does not exceed ODP_PKTOUT_MAX_QUEUES. */ @@ -917,6 +999,22 @@ typedef struct odp_pktio_capability_t { /** Statistics counters capabilities */ odp_pktio_stats_capability_t stats; + /** Supported flow control modes */ + struct { + /** Reception of traditional Ethernet pause frames */ + uint32_t pause_rx: 1; + + /** Reception of PFC frames */ + uint32_t pfc_rx: 1; + + /** Generation of traditional Ethernet pause frames */ + uint32_t pause_tx: 1; + + /** Generation of PFC frames */ + uint32_t pfc_tx: 1; + + } flow_control; + } odp_pktio_capability_t; /** @@ -964,9 +1062,13 @@ typedef struct odp_lso_profile_param_t { /** Link status */ typedef enum odp_pktio_link_status_t { + /** Link status is unknown */ ODP_PKTIO_LINK_STATUS_UNKNOWN = -1, + /** Link status is down */ ODP_PKTIO_LINK_STATUS_DOWN = 0, + /** Link status is up */ ODP_PKTIO_LINK_STATUS_UP = 1 + } odp_pktio_link_status_t; /** @@ -1034,21 +1136,19 @@ typedef enum odp_pktio_link_autoneg_t { ODP_PKTIO_LINK_AUTONEG_OFF = 0, /** Autonegotiation enabled */ ODP_PKTIO_LINK_AUTONEG_ON = 1 + } odp_pktio_link_autoneg_t; /** Duplex mode */ typedef enum odp_pktio_link_duplex_t { + /** Link duplex mode is unknown */ ODP_PKTIO_LINK_DUPLEX_UNKNOWN = -1, + /** Half duplex mode */ ODP_PKTIO_LINK_DUPLEX_HALF = 0, + /** Full duplex mode */ ODP_PKTIO_LINK_DUPLEX_FULL = 1 -} odp_pktio_link_duplex_t; -/** Ethernet pause frame (flow control) mode */ -typedef enum odp_pktio_link_pause_t { - ODP_PKTIO_LINK_PAUSE_UNKNOWN = -1, - ODP_PKTIO_LINK_PAUSE_OFF = 0, - ODP_PKTIO_LINK_PAUSE_ON = 1 -} odp_pktio_link_pause_t; +} odp_pktio_link_duplex_t; /** * Packet IO link information diff --git a/include/odp/api/spec/packet_types.h b/include/odp/api/spec/packet_types.h index 113f24d943..1eba3506f0 100644 --- a/include/odp/api/spec/packet_types.h +++ b/include/odp/api/spec/packet_types.h @@ -89,95 +89,92 @@ extern "C" { #define ODP_NUM_PACKET_COLORS 3 /** - * @typedef odp_proto_l2_type_t * Layer 2 protocol type */ +typedef uint8_t odp_proto_l2_type_t; -/** - * @def ODP_PROTO_L2_TYPE_NONE - * Layer 2 protocol type not defined - * - * @def ODP_PROTO_L2_TYPE_ETH - * Layer 2 protocol is Ethernet - */ +/** Layer 2 protocol type not defined */ +#define ODP_PROTO_L2_TYPE_NONE 0 + + /** Layer 2 protocol is Ethernet */ +#define ODP_PROTO_L2_TYPE_ETH 1 /** - * @typedef odp_proto_l3_type_t * Layer 3 protocol type */ +typedef uint16_t odp_proto_l3_type_t; -/** - * @def ODP_PROTO_L3_TYPE_NONE - * Layer 3 protocol type not defined - * - * @def ODP_PROTO_L3_TYPE_ARP - * Layer 3 protocol is ARP - * - * @def ODP_PROTO_L3_TYPE_RARP - * Layer 3 protocol is RARP - * - * @def ODP_PROTO_L3_TYPE_MPLS - * Layer 3 protocol is MPLS - * - * @def ODP_PROTO_L3_TYPE_IPV4 - * Layer 3 protocol type is IPv4 - * - * @def ODP_PROTO_L3_TYPE_IPV6 - * Layer 3 protocol type is IPv6 - */ +/** Layer 3 protocol type not defined */ +#define ODP_PROTO_L3_TYPE_NONE 0xFFFF + +/* Types from IEEE EtherType assignments list */ + +/** Layer 3 protocol is ARP */ +#define ODP_PROTO_L3_TYPE_ARP 0x0806 + +/** Layer 3 protocol is RARP */ +#define ODP_PROTO_L3_TYPE_RARP 0x8035 + +/** Layer 3 protocol is MPLS */ +#define ODP_PROTO_L3_TYPE_MPLS 0x8847 + +/** Layer 3 protocol type is IPv4 */ +#define ODP_PROTO_L3_TYPE_IPV4 0x0800 + +/** Layer 3 protocol type is IPv6 */ +#define ODP_PROTO_L3_TYPE_IPV6 0x86DD /** - * @typedef odp_proto_l4_type_t * Layer 4 protocol type */ +typedef uint8_t odp_proto_l4_type_t; -/** - * @def ODP_PROTO_L4_TYPE_NONE - * Layer 4 protocol type not defined - * - * @def ODP_PROTO_L4_TYPE_ICMPV4 - * Layer 4 protocol type is ICMPv4 - * - * @def ODP_PROTO_L4_TYPE_IGMP - * Layer 4 protocol type is IGMP - * - * @def ODP_PROTO_L4_TYPE_IPV4 - * Layer 4 protocol type is IPv4 - * - * @def ODP_PROTO_L4_TYPE_TCP - * Layer 4 protocol type is TCP - * - * @def ODP_PROTO_L4_TYPE_UDP - * Layer 4 protocol type is UDP - * - * @def ODP_PROTO_L4_TYPE_IPV6 - * Layer 4 protocol type is IPv6 - * - * @def ODP_PROTO_L4_TYPE_GRE - * Layer 4 protocol type is GRE - * - * @def ODP_PROTO_L4_TYPE_ESP - * Layer 4 protocol type is IPSEC ESP - * - * @def ODP_PROTO_L4_TYPE_AH - * Layer 4 protocol type is IPSEC AH - * - * @def ODP_PROTO_L4_TYPE_ICMPV6 - * Layer 4 protocol type is ICMPv6 - * - * @def ODP_PROTO_L4_TYPE_NO_NEXT - * Layer 4 protocol type is "No Next Header". - * Protocol / next header number is 59. - * - * @def ODP_PROTO_L4_TYPE_IPCOMP - * Layer 4 protocol type is IP Payload Compression Protocol - * - * @def ODP_PROTO_L4_TYPE_SCTP - * Layer 4 protocol type is SCTP - * - * @def ODP_PROTO_L4_TYPE_ROHC - * Layer 4 protocol type is ROHC - */ +/** Layer 4 protocol type not defined */ + #define ODP_PROTO_L4_TYPE_NONE 255 + +/* Types from IANA assigned Internet protocol numbers list */ + +/** Layer 4 protocol type is ICMPv4 */ + #define ODP_PROTO_L4_TYPE_ICMPV4 1 + +/** Layer 4 protocol type is IGMP */ +#define ODP_PROTO_L4_TYPE_IGMP 2 + +/** Layer 4 protocol type is IPv4 */ +#define ODP_PROTO_L4_TYPE_IPV4 4 + +/** Layer 4 protocol type is TCP */ + #define ODP_PROTO_L4_TYPE_TCP 6 + +/** Layer 4 protocol type is UDP */ +#define ODP_PROTO_L4_TYPE_UDP 17 + +/** Layer 4 protocol type is IPv6 */ +#define ODP_PROTO_L4_TYPE_IPV6 41 + +/** Layer 4 protocol type is GRE */ +#define ODP_PROTO_L4_TYPE_GRE 47 + +/** Layer 4 protocol type is IPSEC ESP */ +#define ODP_PROTO_L4_TYPE_ESP 50 + +/** Layer 4 protocol type is IPSEC AH */ +#define ODP_PROTO_L4_TYPE_AH 51 + +/** Layer 4 protocol type is ICMPv6 */ +#define ODP_PROTO_L4_TYPE_ICMPV6 58 + +/** Layer 4 protocol type is No Next Header for IPv6 */ +#define ODP_PROTO_L4_TYPE_NO_NEXT 59 + +/** Layer 4 protocol type is IP Payload Compression Protocol */ +#define ODP_PROTO_L4_TYPE_IPCOMP 108 + +/** Layer 4 protocol type is SCTP */ +#define ODP_PROTO_L4_TYPE_SCTP 132 + +/** Layer 4 protocol type is ROHC */ +#define ODP_PROTO_L4_TYPE_ROHC 142 /** * @typedef odp_packet_chksum_status_t diff --git a/include/odp/api/spec/shared_memory.h b/include/odp/api/spec/shared_memory.h index d1955db266..36b38782f5 100644 --- a/include/odp/api/spec/shared_memory.h +++ b/include/odp/api/spec/shared_memory.h @@ -40,7 +40,15 @@ extern "C" { * Maximum shared memory block name length in chars including null char */ - /* Shared memory flags */ +/** + * @def ODP_SHM_IOVA_INVALID + * Invalid IOVA address + */ + +/** + * @def ODP_SHM_PA_INVALID + * Invalid physical address + */ /** * Application SW only, no HW access @@ -123,8 +131,42 @@ typedef struct odp_shm_info_t { /** ODP_SHM_* flags */ uint32_t flags; + + /** + * Number of memory segments + * + * Number of segments in physical (or IOVA) address space that map memory to + * the SHM block. More information about each segment can be retrieved with + * odp_shm_segment_info(). Number of segments is always at least one, also when + * physical (or IOVA) segmentation information is not available. */ + uint32_t num_seg; + } odp_shm_info_t; +/** + * SHM memory segment info + */ +typedef struct odp_shm_segment_info_t { + /** Segment start address (virtual) */ + uintptr_t addr; + + /** Segment start address in IO virtual address (IOVA) space + * + * Value is ODP_SHM_IOVA_INVALID when IOVA address is not available. + */ + uint64_t iova; + + /** Segment start address in physical address space + * + * Value is ODP_SHM_PA_INVALID when physical address is not available. + */ + uint64_t pa; + + /** Segment length in bytes */ + uint64_t len; + +} odp_shm_segment_info_t; + /** * Shared memory capabilities */ @@ -173,22 +215,25 @@ int odp_shm_capability(odp_shm_capability_t *capa); * Reserve a contiguous block of shared memory * * Reserve a contiguous block of shared memory that fulfills size, alignment - * and shareability (ODP_SHM_* flags) requirements. In general, a name is - * optional and does not need to be unique. However, if the block will be + * and shareability (ODP_SHM_* flags) requirements. By default (without flags), the memory + * block can be shared between all threads of the ODP instance. Memory address (odp_shm_addr()) + * shareability depends on application memory model and #ODP_SHM_SINGLE_VA flag usage. + * + * Name is optional and does not need to be unique. However, if the block will be * searched with odp_shm_lookup() or odp_shm_import(), a unique name is needed * for correct match. * - * @param name Name of the block or NULL. Maximum string length is - * ODP_SHM_NAME_LEN. + * @param name Name of the block or NULL. Maximum string length is ODP_SHM_NAME_LEN. * @param size Block size in bytes * @param align Block alignment in bytes * @param flags Shared memory parameter flags (ODP_SHM_*). Default value is 0. * * @return Handle of the reserved block * @retval ODP_SHM_INVALID on failure + * + * @see odp_mem_model_t */ -odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, - uint32_t flags); +odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, uint32_t flags); /** * Free a contiguous block of shared memory @@ -249,9 +294,7 @@ void *odp_shm_addr(odp_shm_t shm); /** * Shared memory block info * - * Get information about the specified shared memory block. This is the only - * shared memory API function which accepts invalid shm handles (any bit value) - * without causing undefined behavior. + * Get information about the specified shared memory block. * * @param shm Block handle * @param[out] info Block info pointer for output @@ -261,6 +304,27 @@ void *odp_shm_addr(odp_shm_t shm); */ int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info); +/** + * SHM block segmentation information + * + * Retrieve information about each memory segment of an SHM block. SHM info call outputs the number + * of memory segments (@see odp_shm_info_t.num_seg). A single segment info call may be used + * to request information for all the segments, or for a subset of those. Use 'index' and 'num' + * parameters to specify the segments. Segment indexing starts from zero and continues to + * odp_shm_info_t.num_seg - 1. Segment infos are written in virtual memory address order and + * without address overlaps. Segment info array must have at least 'num' elements. + * + * @param shm SHM block handle + * @param index Index of the first segment to retrieve information + * @param num Number of segments to retrieve information + * @param[out] info Segment info array for output + * + * @retval 0 on success + * @retval <0 on failure + */ +int odp_shm_segment_info(odp_shm_t shm, uint32_t index, uint32_t num, + odp_shm_segment_info_t info[]); + /** * Print all shared memory blocks */ diff --git a/include/odp/api/spec/system_info.h b/include/odp/api/spec/system_info.h index a732639840..118d8f8952 100644 --- a/include/odp/api/spec/system_info.h +++ b/include/odp/api/spec/system_info.h @@ -24,6 +24,9 @@ extern "C" { * @{ */ +/** Maximum memory block name length in chars (including null char) */ +#define ODP_SYSTEM_MEMBLOCK_NAME_LEN 64 + /** * CPU instruction set architecture (ISA) families */ @@ -191,6 +194,70 @@ typedef struct odp_system_info_t { } odp_system_info_t; +/** + * Memory information + */ +typedef struct odp_system_meminfo_t { + /** + * Total mapped memory + * + * Total amount of memory (in bytes) in all memory pages that are reserved by + * this ODP instance from the system. + */ + uint64_t total_mapped; + + /** + * Total memory usage + * + * Total amount of memory (in bytes) that is currently in use by this ODP instance. + * This is a subset of 'total_mapped' bytes. + */ + uint64_t total_used; + + /** + * Total memory usage overheads + * + * Total amount of memory (in bytes) that is currently consumed by roundings to + * alignment/block/page size limits, etc. overheads. This is a subset of 'total_used' + * bytes. + */ + uint64_t total_overhead; + +} odp_system_meminfo_t; + +/** + * Memory block information + */ +typedef struct odp_system_memblock_t { + /** Memory block name */ + char name[ODP_SYSTEM_MEMBLOCK_NAME_LEN]; + + /** Start address of the block */ + uintptr_t addr; + + /** + * Memory usage + * + * Total amount of memory (in bytes) that is used by this block. + */ + uint64_t used; + + /** + * Memory usage overheads + * + * Total amount of memory (in bytes) that is currently consumed by rounding to + * alignment/block/page size limits, etc. overheads. This is a subset of 'used' bytes. + */ + uint64_t overhead; + + /** Memory page size in bytes + * + * Page size used for this block. + */ + uint64_t page_size; + +} odp_system_memblock_t; + /** * Retrieve system information * @@ -204,6 +271,27 @@ typedef struct odp_system_info_t { */ int odp_system_info(odp_system_info_t *info); +/** + * Retrieve ODP memory usage information + * + * Retrieves information about ODP memory usage for debugging and monitoring purposes. A successful + * call fills in system memory info and outputs up to 'num' elements into memory block info array. + * Each array element represents a memory block used due to an API call (SHM reservation, pool + * creation, etc) or an implementation internal memory allocation. + * + * When return value is 'num' or less, it indicates the number of elements written. If return value + * is larger than 'num', all 'num' elements were written and the return value indicates the number + * of elements that would have been written into a large enough array. + * + * @param[out] info Pointer to memory info struct for output + * @param[out] block Pointer memory block info array for output + * @param num Maximum number of array elements to output (0 ... array size) + * + * @return Number of array elements written / would have been written + * @retval <0 on failure + */ +int32_t odp_system_meminfo(odp_system_meminfo_t *info, odp_system_memblock_t block[], int32_t num); + /** * Default system huge page size in bytes * diff --git a/include/odp/arch/arm32-linux/odp/api/abi/ipsec_types.h b/include/odp/arch/arm32-linux/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..49d854444e --- /dev/null +++ b/include/odp/arch/arm32-linux/odp/api/abi/ipsec_types.h @@ -0,0 +1,7 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include diff --git a/include/odp/arch/arm64-linux/odp/api/abi/ipsec_types.h b/include/odp/arch/arm64-linux/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..49d854444e --- /dev/null +++ b/include/odp/arch/arm64-linux/odp/api/abi/ipsec_types.h @@ -0,0 +1,7 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include diff --git a/include/odp/arch/default-linux/odp/api/abi/ipsec_types.h b/include/odp/arch/default-linux/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..49d854444e --- /dev/null +++ b/include/odp/arch/default-linux/odp/api/abi/ipsec_types.h @@ -0,0 +1,7 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include diff --git a/include/odp/arch/power64-linux/odp/api/abi/ipsec_types.h b/include/odp/arch/power64-linux/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..49d854444e --- /dev/null +++ b/include/odp/arch/power64-linux/odp/api/abi/ipsec_types.h @@ -0,0 +1,7 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/ipsec_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..49d854444e --- /dev/null +++ b/include/odp/arch/x86_32-linux/odp/api/abi/ipsec_types.h @@ -0,0 +1,7 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/ipsec_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..49d854444e --- /dev/null +++ b/include/odp/arch/x86_64-linux/odp/api/abi/ipsec_types.h @@ -0,0 +1,7 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am index 9d24d2e6cf..3bd599bcd0 100644 --- a/platform/linux-dpdk/Makefile.am +++ b/platform/linux-dpdk/Makefile.am @@ -37,6 +37,7 @@ odpapiplatinclude_HEADERS = \ include/odp/api/plat/event_inline_types.h \ include/odp/api/plat/event_vector_inline_types.h \ include/odp/api/plat/hash_inlines.h \ + include/odp/api/plat/ipsec_inlines.h \ include/odp/api/plat/packet_flag_inlines.h \ include/odp/api/plat/packet_inline_types.h \ include/odp/api/plat/packet_inlines.h \ @@ -80,6 +81,7 @@ odpapiabiarchinclude_HEADERS += \ include-abi/odp/api/abi/hash.h \ include-abi/odp/api/abi/init.h \ include-abi/odp/api/abi/ipsec.h \ + include-abi/odp/api/abi/ipsec_types.h \ include-abi/odp/api/abi/packet.h \ include-abi/odp/api/abi/packet_types.h \ include-abi/odp/api/abi/packet_flags.h \ @@ -249,6 +251,7 @@ __LIB__libodp_dpdk_la_SOURCES += \ ../linux-generic/odp_cpu_api.c \ ../linux-generic/odp_event_api.c \ ../linux-generic/odp_hash_api.c \ + ../linux-generic/odp_ipsec_api.c \ ../linux-generic/odp_packet_api.c \ ../linux-generic/odp_packet_flags_api.c \ ../linux-generic/odp_packet_io_api.c \ diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h new file mode 120000 index 0000000000..b6ca88309b --- /dev/null +++ b/platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h @@ -0,0 +1 @@ +../../../../../linux-generic/include-abi/odp/api/abi/ipsec_types.h \ No newline at end of file diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h index fadcaacb63..1f5f9e6f73 100644 --- a/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h +++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h @@ -47,39 +47,6 @@ typedef ODP_HANDLE_T(odp_packet_tx_compl_t); #define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0) -typedef uint8_t odp_proto_l2_type_t; - -#define ODP_PROTO_L2_TYPE_NONE 0 -#define ODP_PROTO_L2_TYPE_ETH 1 - -typedef uint8_t odp_proto_l3_type_t; - -#define ODP_PROTO_L3_TYPE_NONE 0 -#define ODP_PROTO_L3_TYPE_ARP 1 -#define ODP_PROTO_L3_TYPE_RARP 2 -#define ODP_PROTO_L3_TYPE_MPLS 3 -#define ODP_PROTO_L3_TYPE_IPV4 4 -#define ODP_PROTO_L3_TYPE_IPV6 6 - -typedef uint8_t odp_proto_l4_type_t; - -/* Numbers from IANA Assigned Internet Protocol Numbers list */ -#define ODP_PROTO_L4_TYPE_NONE 0 -#define ODP_PROTO_L4_TYPE_ICMPV4 1 -#define ODP_PROTO_L4_TYPE_IGMP 2 -#define ODP_PROTO_L4_TYPE_IPV4 4 -#define ODP_PROTO_L4_TYPE_TCP 6 -#define ODP_PROTO_L4_TYPE_UDP 17 -#define ODP_PROTO_L4_TYPE_IPV6 41 -#define ODP_PROTO_L4_TYPE_GRE 47 -#define ODP_PROTO_L4_TYPE_ESP 50 -#define ODP_PROTO_L4_TYPE_AH 51 -#define ODP_PROTO_L4_TYPE_ICMPV6 58 -#define ODP_PROTO_L4_TYPE_NO_NEXT 59 -#define ODP_PROTO_L4_TYPE_IPCOMP 108 -#define ODP_PROTO_L4_TYPE_SCTP 132 -#define ODP_PROTO_L4_TYPE_ROHC 142 - typedef enum { ODP_PACKET_GREEN = 0, ODP_PACKET_YELLOW = 1, diff --git a/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h b/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h index 3aa3993995..6265914b49 100644 --- a/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h +++ b/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h @@ -29,14 +29,13 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_event_inline_offset_t _odp_event_inline_offset; -extern const _odp_buffer_inline_offset_t _odp_buffer_inline_offset; - #ifndef _ODP_NO_INLINE /* Inline functions by default */ #define _ODP_INLINE static inline #define odp_buffer_from_event __odp_buffer_from_event + #define odp_buffer_from_event_multi __odp_buffer_from_event_multi #define odp_buffer_to_event __odp_buffer_to_event + #define odp_buffer_to_event_multi __odp_buffer_to_event_multi #define odp_buffer_addr __odp_buffer_addr #define odp_buffer_size __odp_buffer_size #define odp_buffer_pool __odp_buffer_pool @@ -55,11 +54,23 @@ _ODP_INLINE odp_buffer_t odp_buffer_from_event(odp_event_t ev) return (odp_buffer_t)ev; } +_ODP_INLINE void odp_buffer_from_event_multi(odp_buffer_t buf[], const odp_event_t ev[], int num) +{ + for (int i = 0; i < num; i++) + buf[i] = odp_buffer_from_event(ev[i]); +} + _ODP_INLINE odp_event_t odp_buffer_to_event(odp_buffer_t buf) { return (odp_event_t)buf; } +_ODP_INLINE void odp_buffer_to_event_multi(const odp_buffer_t buf[], odp_event_t ev[], int num) +{ + for (int i = 0; i < num; i++) + ev[i] = odp_buffer_to_event(buf[i]); +} + _ODP_INLINE void *odp_buffer_addr(odp_buffer_t buf) { return _odp_event_hdr_field(buf, void *, base_data); diff --git a/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h index caa18fc93d..ee5490ff1a 100644 --- a/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h +++ b/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h @@ -34,6 +34,8 @@ typedef struct _odp_event_inline_offset_t { } _odp_event_inline_offset_t; +extern const _odp_event_inline_offset_t _odp_event_inline_offset; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h b/platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h new file mode 120000 index 0000000000..72c865d7d5 --- /dev/null +++ b/platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h @@ -0,0 +1 @@ +../../../../../linux-generic/include/odp/api/plat/ipsec_inlines.h \ No newline at end of file diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h index 7482fca93c..b6876e6d72 100644 --- a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h +++ b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h @@ -22,8 +22,6 @@ extern "C" { /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_packet_inline_offset_t _odp_packet_inline; - static inline uint64_t _odp_packet_input_flags(odp_packet_t pkt) { return _odp_pkt_get(pkt, uint64_t, input_flags); diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h index 4bf00dac4a..9a72a70ef1 100644 --- a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h +++ b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h @@ -43,6 +43,7 @@ typedef struct _odp_packet_inline_offset_t { uint16_t flags; uint16_t subtype; uint16_t cls_mark; + uint16_t ipsec_ctx; uint16_t buf_addr; uint16_t data; uint16_t pkt_len; @@ -55,6 +56,8 @@ typedef struct _odp_packet_inline_offset_t { } _odp_packet_inline_offset_t; +extern const _odp_packet_inline_offset_t _odp_packet_inline; + /* Packet input & protocol flags */ typedef union { /* All input flags */ diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h index 76559ba86f..4347afb88f 100644 --- a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h +++ b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h @@ -130,10 +130,6 @@ extern "C" { #define _ODP_INLINE #endif -extern const _odp_packet_inline_offset_t _odp_packet_inline; - -extern const _odp_pool_inline_offset_t _odp_pool_inline; - _ODP_INLINE void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len, odp_packet_seg_t *seg) { diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h index ec6804c72e..330cbe4cee 100644 --- a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h +++ b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h @@ -29,6 +29,8 @@ typedef struct _odp_timeout_inline_offset_t { } _odp_timeout_inline_offset_t; +extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h index f8ff5c9388..357d4df068 100644 --- a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h +++ b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h @@ -17,8 +17,6 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset; - #ifndef _ODP_NO_INLINE /* Inline functions by default */ #define _ODP_INLINE static inline diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h index 27e7fa077f..78b1f343a1 100644 --- a/platform/linux-dpdk/include/odp_packet_io_internal.h +++ b/platform/linux-dpdk/include/odp_packet_io_internal.h @@ -36,7 +36,6 @@ extern "C" { #include #include -#define PKTIO_MAX_QUEUES ODP_PKTOUT_MAX_QUEUES #define PKTIO_LSO_PROFILES 16 /* Assume at least Ethernet header per each segment */ #define PKTIO_LSO_MIN_PAYLOAD_OFFSET 14 @@ -55,9 +54,9 @@ ODP_STATIC_ASSERT(PKTIO_LSO_PROFILES < UINT8_MAX, "PKTIO_LSO_PROFILES_ERROR"); struct pktio_if_ops; #if ODP_CACHE_LINE_SIZE == 128 -#define PKTIO_PRIVATE_SIZE 1408 +#define PKTIO_PRIVATE_SIZE 1536 #else -#define PKTIO_PRIVATE_SIZE 1216 +#define PKTIO_PRIVATE_SIZE 1344 #endif typedef struct ODP_ALIGNED_CACHE { @@ -140,21 +139,13 @@ typedef struct ODP_ALIGNED_CACHE { odp_queue_t queue; odp_pktin_queue_t pktin; odp_pktin_vector_config_t vector; - } in_queue[PKTIO_MAX_QUEUES]; + } in_queue[ODP_PKTIN_MAX_QUEUES]; struct { odp_queue_t queue; odp_pktout_queue_t pktout; - } out_queue[PKTIO_MAX_QUEUES]; + } out_queue[ODP_PKTOUT_MAX_QUEUES]; - /* inotify instance for pcapng fifos */ - struct { - enum { - PCAPNG_WR_STOP = 0, - PCAPNG_WR_PKT, - } state[PKTIO_MAX_QUEUES]; - int fd[PKTIO_MAX_QUEUES]; - } pcapng; } pktio_entry_t; typedef struct { diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c index 83be3dfed9..16b940f2f1 100644 --- a/platform/linux-dpdk/odp_packet.c +++ b/platform/linux-dpdk/odp_packet.c @@ -65,6 +65,7 @@ const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = { .flags = offsetof(odp_packet_hdr_t, p.flags), .subtype = offsetof(odp_packet_hdr_t, subtype), .cls_mark = offsetof(odp_packet_hdr_t, cls_mark), + .ipsec_ctx = offsetof(odp_packet_hdr_t, ipsec_ctx), .buf_addr = offsetof(odp_packet_hdr_t, event_hdr.mb.buf_addr), .data = offsetof(odp_packet_hdr_t, event_hdr.mb.data_off), .pkt_len = offsetof(odp_packet_hdr_t, event_hdr.mb.pkt_len), diff --git a/platform/linux-dpdk/odp_packet_dpdk.c b/platform/linux-dpdk/odp_packet_dpdk.c index a8e4298fbe..f62e9c2cc3 100644 --- a/platform/linux-dpdk/odp_packet_dpdk.c +++ b/platform/linux-dpdk/odp_packet_dpdk.c @@ -82,7 +82,7 @@ /** DPDK runtime configuration options */ typedef struct { int multicast_enable; - int num_rx_desc; + int num_rx_desc_default; int num_tx_desc_default; int rx_drop_en; } dpdk_opt_t; @@ -114,15 +114,17 @@ typedef struct ODP_ALIGNED_CACHE { uint32_t mtu_max; /* DPDK MTU has been modified */ uint8_t mtu_set; + /* Number of RX descriptors per queue */ + uint16_t num_rx_desc[ODP_PKTIN_MAX_QUEUES]; /* Number of TX descriptors per queue */ - uint16_t num_tx_desc[PKTIO_MAX_QUEUES]; + uint16_t num_tx_desc[ODP_PKTOUT_MAX_QUEUES]; /* --- Locks for MT safe operations --- */ /* RX queue locks */ - odp_ticketlock_t rx_lock[PKTIO_MAX_QUEUES] ODP_ALIGNED_CACHE; + odp_ticketlock_t rx_lock[ODP_PKTIN_MAX_QUEUES] ODP_ALIGNED_CACHE; /* TX queue locks */ - odp_ticketlock_t tx_lock[PKTIO_MAX_QUEUES] ODP_ALIGNED_CACHE; + odp_ticketlock_t tx_lock[ODP_PKTOUT_MAX_QUEUES] ODP_ALIGNED_CACHE; } pkt_dpdk_t; @@ -174,7 +176,7 @@ static int init_options(pktio_entry_t *pktio_entry, dpdk_opt_t *opt = &pkt_priv(pktio_entry)->opt; if (!lookup_opt("num_rx_desc", dev_info->driver_name, - &opt->num_rx_desc)) + &opt->num_rx_desc_default)) return -1; if (!lookup_opt("num_tx_desc", dev_info->driver_name, @@ -194,7 +196,7 @@ static int init_options(pktio_entry_t *pktio_entry, _ODP_DBG("DPDK interface (%s): %" PRIu16 "\n", dev_info->driver_name, pkt_priv(pktio_entry)->port_id); _ODP_DBG(" multicast: %d\n", opt->multicast_enable); - _ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc); + _ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc_default); _ODP_DBG(" num_tx_desc: %d\n", opt->num_tx_desc_default); _ODP_DBG(" rx_drop_en: %d\n", opt->rx_drop_en); @@ -395,6 +397,7 @@ static void prepare_rss_conf(pktio_entry_t *pktio_entry, static int dpdk_input_queues_config(pktio_entry_t *pktio_entry, const odp_pktin_queue_param_t *p) { + pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry); odp_pktin_mode_t mode = pktio_entry->param.in_mode; uint8_t lockless; @@ -403,13 +406,32 @@ static int dpdk_input_queues_config(pktio_entry_t *pktio_entry, /** * Scheduler synchronizes input queue polls. Only single thread * at a time polls a queue */ - if (mode == ODP_PKTIN_MODE_SCHED || - p->op_mode == ODP_PKTIO_OP_MT_UNSAFE) + if (mode == ODP_PKTIN_MODE_SCHED || p->op_mode == ODP_PKTIO_OP_MT_UNSAFE) lockless = 1; else lockless = 0; - pkt_priv(pktio_entry)->flags.lockless_rx = lockless; + pkt_dpdk->flags.lockless_rx = lockless; + + /* Configure RX descriptors */ + for (uint32_t i = 0; i < p->num_queues; i++) { + uint16_t num_rx_desc = pkt_dpdk->opt.num_rx_desc_default; + int ret; + + if (mode == ODP_PKTIN_MODE_DIRECT && p->queue_size[i] != 0) + num_rx_desc = p->queue_size[i]; + + /* Adjust descriptor count */ + ret = rte_eth_dev_adjust_nb_rx_tx_desc(pkt_dpdk->port_id, &num_rx_desc, NULL); + if (ret && ret != -ENOTSUP) { + _ODP_ERR("DPDK: rte_eth_dev_adjust_nb_rx_tx_desc() failed: %d\n", ret); + return -1; + } + pkt_dpdk->num_rx_desc[i] = num_rx_desc; + + _ODP_DBG("Port %" PRIu16 " RX queue %" PRIu32 " using %" PRIu16 " descriptors\n", + pkt_dpdk->port_id, i, num_rx_desc); + } return 0; } @@ -417,10 +439,8 @@ static int dpdk_input_queues_config(pktio_entry_t *pktio_entry, static int dpdk_output_queues_config(pktio_entry_t *pktio_entry, const odp_pktout_queue_param_t *p) { - struct rte_eth_dev_info dev_info; pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry); uint8_t lockless; - int ret; if (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE) lockless = 1; @@ -429,15 +449,10 @@ static int dpdk_output_queues_config(pktio_entry_t *pktio_entry, pkt_dpdk->flags.lockless_tx = lockless; - ret = rte_eth_dev_info_get(pkt_dpdk->port_id, &dev_info); - if (ret) { - _ODP_ERR("DPDK: rte_eth_dev_info_get() failed: %d\n", ret); - return -1; - } - /* Configure TX descriptors */ for (uint32_t i = 0; i < p->num_queues; i++) { uint16_t num_tx_desc = pkt_dpdk->opt.num_tx_desc_default; + int ret; if (p->queue_size[i] != 0) num_tx_desc = p->queue_size[i]; @@ -450,8 +465,8 @@ static int dpdk_output_queues_config(pktio_entry_t *pktio_entry, } pkt_dpdk->num_tx_desc[i] = num_tx_desc; - _ODP_DBG("TX queue %" PRIu32 " using %" PRIu16 " descriptors\n", i, num_tx_desc); - } + _ODP_DBG("Port %" PRIu16 " TX queue %" PRIu32 " using %" PRIu16 " descriptors\n", + pkt_dpdk->port_id, i, num_tx_desc); } return 0; } @@ -508,8 +523,9 @@ static int dpdk_init_capability(pktio_entry_t *pktio_entry, memset(capa, 0, sizeof(odp_pktio_capability_t)); - capa->max_input_queues = RTE_MIN(dev_info->max_rx_queues, - PKTIO_MAX_QUEUES); + capa->max_input_queues = RTE_MIN(dev_info->max_rx_queues, ODP_PKTIN_MAX_QUEUES); + capa->min_input_queue_size = dev_info->rx_desc_lim.nb_min; + capa->max_input_queue_size = dev_info->rx_desc_lim.nb_max; /* ixgbe devices support only 16 RX queues in RSS mode */ if (!strncmp(dev_info->driver_name, IXGBE_DRV_NAME, @@ -517,8 +533,7 @@ static int dpdk_init_capability(pktio_entry_t *pktio_entry, capa->max_input_queues = RTE_MIN(16, (int)capa->max_input_queues); - capa->max_output_queues = RTE_MIN(dev_info->max_tx_queues, - PKTIO_MAX_QUEUES); + capa->max_output_queues = RTE_MIN(dev_info->max_tx_queues, ODP_PKTOUT_MAX_QUEUES); capa->min_output_queue_size = dev_info->tx_desc_lim.nb_min; capa->max_output_queue_size = dev_info->tx_desc_lim.nb_max; @@ -700,10 +715,10 @@ static int setup_pkt_dpdk(odp_pktio_t pktio ODP_UNUSED, else rte_eth_allmulticast_disable(pkt_dpdk->port_id); - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) odp_ticketlock_init(&pkt_dpdk->rx_lock[i]); + for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++) odp_ticketlock_init(&pkt_dpdk->tx_lock[i]); - } return 0; } @@ -762,24 +777,14 @@ static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry, uint32_t i; int ret; uint16_t port_id = pkt_dpdk->port_id; - uint16_t num_rx_desc = pkt_dpdk->opt.num_rx_desc; pool_t *pool = _odp_pool_entry(pktio_entry->pool); rxconf = dev_info->default_rxconf; rxconf.rx_drop_en = pkt_dpdk->opt.rx_drop_en; - /* Adjust descriptor count */ - ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &num_rx_desc, NULL); - if (ret && ret != -ENOTSUP) { - _ODP_ERR("DPDK: rte_eth_dev_adjust_nb_rx_tx_desc() failed: %d\n", ret); - return -1; - } - - _ODP_DBG("RX queues using %" PRIu16 " descriptors\n", num_rx_desc); - for (i = 0; i < pktio_entry->num_in_queue; i++) { - ret = rte_eth_rx_queue_setup(port_id, i, num_rx_desc, + ret = rte_eth_rx_queue_setup(port_id, i, pkt_dpdk->num_rx_desc[i], rte_eth_dev_socket_id(port_id), &rxconf, pool->rte_mempool); if (ret < 0) { diff --git a/platform/linux-dpdk/odp_shared_memory.c b/platform/linux-dpdk/odp_shared_memory.c index dba91eede9..470b78eb5c 100644 --- a/platform/linux-dpdk/odp_shared_memory.c +++ b/platform/linux-dpdk/odp_shared_memory.c @@ -68,13 +68,15 @@ typedef struct { * Memory block descriptor */ typedef struct { + /* DPDK memzone. If != NULL, the shm block is interpreted as reserved. */ + const struct rte_memzone *mz; + /* User requested SHM size */ + uint64_t size; /* Memory block type */ shm_type_t type; /* Memory block name */ char name[ODP_SHM_NAME_LEN]; - /* DPDK memzone. If this pointer != NULL, the shm block is interpreted - * as reserved. */ - const struct rte_memzone *mz; + } shm_block_t; /** @@ -121,19 +123,20 @@ static void name_to_mz_name(const char *name, char *mz_name) } /** - * Convert DPDK memzone length into ODP shm block size + * Return a pointer to shm zone descriptor stored at the end of DPDK memzone */ -static uint64_t shm_size(const struct rte_memzone *mz) +static shm_zone_t *shm_zone(const struct rte_memzone *mz) { - return mz->len - sizeof(shm_zone_t); + return (shm_zone_t *)(uintptr_t)((uint8_t *)mz->addr + mz->len - sizeof(shm_zone_t)); } -/** - * Return a pointer to shm zone descriptor stored at the end of DPDK memzone - */ -static shm_zone_t *shm_zone(const struct rte_memzone *mz) +static shm_block_t *mz_to_shm_block(const struct rte_memzone *mz) { - return (shm_zone_t *)(uintptr_t)((uint8_t *)mz->addr + shm_size(mz)); + for (int i = 0; i < SHM_MAX_NB_BLOCKS; i++) { + if (shm_tbl->block[i].mz == mz) + return &shm_tbl->block[i]; + } + return NULL; } static int find_free_block(void) @@ -294,6 +297,8 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, snprintf(block->name, ODP_SHM_NAME_LEN, "%s", name); block->name[ODP_SHM_NAME_LEN - 1] = 0; block->type = SHM_TYPE_LOCAL; + block->size = size; + /* Note: ODP_SHM_SW_ONLY/ODP_SHM_PROC/ODP_SHM_SINGLE_VA flags are * currently ignored. */ shm_zone(mz)->flags = flags; @@ -427,15 +432,114 @@ int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info) info->name = block->name; info->addr = block->mz->addr; - info->size = shm_size(block->mz); + info->size = block->size; info->page_size = block->mz->hugepage_sz; info->flags = shm_zone(block->mz)->flags; + info->num_seg = 1; odp_spinlock_unlock(&shm_tbl->lock); return 0; } +int odp_shm_segment_info(odp_shm_t shm, uint32_t index, uint32_t num, + odp_shm_segment_info_t seg_info[]) +{ + shm_block_t *block; + int idx = handle_to_idx(shm); + phys_addr_t pa; + + if (index != 0 || num != 1) { + _ODP_ERR("Only single segment supported (%u, %u)\n", index, num); + return -1; + } + + odp_spinlock_lock(&shm_tbl->lock); + + if (!handle_is_valid(shm)) { + odp_spinlock_unlock(&shm_tbl->lock); + return -1; + } + + block = &shm_tbl->block[idx]; + pa = rte_mem_virt2phy(block->mz->addr); + + seg_info[0].addr = (uintptr_t)block->mz->addr; + seg_info[0].iova = block->mz->iova != RTE_BAD_IOVA ? block->mz->iova : ODP_SHM_IOVA_INVALID; + seg_info[0].pa = pa != RTE_BAD_IOVA ? pa : ODP_SHM_PA_INVALID; + seg_info[0].len = block->size; + + odp_spinlock_unlock(&shm_tbl->lock); + + return 0; +} + +typedef struct { + odp_system_meminfo_t *info; + odp_system_memblock_t *memblock; + int32_t blocks; + int32_t max_num; + +} memzone_walker_data_t; + +static void walk_memzone(const struct rte_memzone *mz, void *arg) +{ + memzone_walker_data_t *data = arg; + shm_block_t *block = mz_to_shm_block(mz); + odp_system_memblock_t *memblock; + int32_t cur = data->blocks; + const char *name; + int name_len; + + data->info->total_mapped += mz->len; + data->blocks++; + + if (block != NULL) { + name = block->name; + data->info->total_used += block->size; + data->info->total_overhead += mz->len - block->size; + } else { /* DPDK internal reservations */ + name = mz->name; + data->info->total_used += mz->len; + } + + if (cur >= data->max_num) + return; + memblock = &data->memblock[cur]; + + name_len = strlen(name); + if (name_len >= ODP_SYSTEM_MEMBLOCK_NAME_LEN) + name_len = ODP_SYSTEM_MEMBLOCK_NAME_LEN - 1; + + memcpy(memblock->name, name, name_len); + memblock->name[name_len] = 0; + + memblock->addr = (uintptr_t)mz->addr; + memblock->used = mz->len; + memblock->overhead = block != NULL ? mz->len - block->size : 0; + memblock->page_size = mz->hugepage_sz; +} + +int32_t odp_system_meminfo(odp_system_meminfo_t *info, odp_system_memblock_t memblock[], + int32_t max_num) +{ + memzone_walker_data_t walker_data; + + memset(info, 0, sizeof(odp_system_meminfo_t)); + memset(&walker_data, 0, sizeof(memzone_walker_data_t)); + walker_data.max_num = max_num; + walker_data.info = info; + walker_data.memblock = memblock; + + odp_spinlock_lock(&shm_tbl->lock); + + rte_memzone_walk(walk_memzone, (void *)&walker_data); + + odp_spinlock_unlock(&shm_tbl->lock); + + return walker_data.blocks; +} + void odp_shm_print_all(void) { shm_block_t *block; @@ -451,7 +555,7 @@ void odp_shm_print_all(void) continue; _ODP_PRINT(" %s: addr: %p, len: %" PRIu64 " page size: %" PRIu64 "\n", block->name, block->mz->addr, - shm_size(block->mz), block->mz->hugepage_sz); + block->size, block->mz->hugepage_sz); } odp_spinlock_unlock(&shm_tbl->lock); @@ -476,9 +580,9 @@ void odp_shm_print(odp_shm_t shm) _ODP_PRINT(" type: %s\n", block->type == SHM_TYPE_LOCAL ? "local" : "remote"); _ODP_PRINT(" flags: 0x%x\n", shm_zone(block->mz)->flags); _ODP_PRINT(" start: %p\n", block->mz->addr); - _ODP_PRINT(" len: %" PRIu64 "\n", shm_size(block->mz)); + _ODP_PRINT(" len: %" PRIu64 "\n", block->size); _ODP_PRINT(" page size: %" PRIu64 "\n", block->mz->hugepage_sz); - _ODP_PRINT(" NUMA ID: %" PRIi32 "\n", block->mz->socket_id); + _ODP_PRINT(" NUMA ID: %" PRIi32 "\n", block->mz->socket_id); _ODP_PRINT("\n"); odp_spinlock_unlock(&shm_tbl->lock); diff --git a/platform/linux-dpdk/test/process-mode.conf b/platform/linux-dpdk/test/process-mode.conf new file mode 100644 index 0000000000..b95f50ea2f --- /dev/null +++ b/platform/linux-dpdk/test/process-mode.conf @@ -0,0 +1,7 @@ +# Mandatory fields +odp_implementation = "linux-dpdk" +config_file_version = "0.1.18" + +dpdk: { + process_mode_memory_mb = 1024 +} diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am index ce8b0ba099..e762148aae 100644 --- a/platform/linux-generic/Makefile.am +++ b/platform/linux-generic/Makefile.am @@ -39,6 +39,7 @@ odpapiplatinclude_HEADERS = \ include/odp/api/plat/event_inline_types.h \ include/odp/api/plat/event_vector_inline_types.h \ include/odp/api/plat/hash_inlines.h \ + include/odp/api/plat/ipsec_inlines.h \ include/odp/api/plat/packet_flag_inlines.h \ include/odp/api/plat/packet_inline_types.h \ include/odp/api/plat/packet_inlines.h \ @@ -82,6 +83,7 @@ odpapiabiarchinclude_HEADERS += \ include-abi/odp/api/abi/hash.h \ include-abi/odp/api/abi/init.h \ include-abi/odp/api/abi/ipsec.h \ + include-abi/odp/api/abi/ipsec_types.h \ include-abi/odp/api/abi/packet.h \ include-abi/odp/api/abi/packet_types.h \ include-abi/odp/api/abi/packet_flags.h \ @@ -288,6 +290,7 @@ __LIB__libodp_linux_la_SOURCES += \ odp_cpu_api.c \ odp_event_api.c \ odp_hash_api.c \ + odp_ipsec_api.c \ odp_packet_api.c \ odp_packet_flags_api.c \ odp_packet_io_api.c \ diff --git a/platform/linux-generic/include-abi/odp/api/abi/ipsec.h b/platform/linux-generic/include-abi/odp/api/abi/ipsec.h index a04bb17414..1817e5564c 100644 --- a/platform/linux-generic/include-abi/odp/api/abi/ipsec.h +++ b/platform/linux-generic/include-abi/odp/api/abi/ipsec.h @@ -1,4 +1,5 @@ /* Copyright (c) 2016-2018, Linaro Limited + * Copyright (c) 2022, Nokia * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -7,7 +8,7 @@ /** * @file * - * ODP IPSEC API - platform specific types + * ODP IPsec */ #ifndef ODP_API_ABI_IPSEC_H_ @@ -17,16 +18,8 @@ extern "C" { #endif -#include -#include - -/** @ingroup odp_ipsec - * @{ - */ - -typedef ODP_HANDLE_T(odp_ipsec_sa_t); - -#define ODP_IPSEC_SA_INVALID _odp_cast_scalar(odp_ipsec_sa_t, 0) +/* Inlined API functions */ +#include /** * @} diff --git a/platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h b/platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h new file mode 100644 index 0000000000..376666ded9 --- /dev/null +++ b/platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2016-2018, Linaro Limited + * Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/** + * @file + * + * ODP IPsec platform specific types + */ + +#ifndef ODP_API_ABI_IPSEC_TYPES_H_ +#define ODP_API_ABI_IPSEC_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include + +/** @ingroup odp_ipsec + * @{ + */ + +typedef ODP_HANDLE_T(odp_ipsec_sa_t); + +#define ODP_IPSEC_SA_INVALID _odp_cast_scalar(odp_ipsec_sa_t, 0) + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h b/platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h index 3106f26a72..1692e71b16 100644 --- a/platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h +++ b/platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h @@ -47,6 +47,8 @@ typedef struct odp_pktout_queue_t { #define ODP_PKTIN_NO_WAIT 0 +#define ODP_PKTIN_MAX_QUEUES 64 + #define ODP_PKTOUT_MAX_QUEUES 64 #define ODP_PKTIO_STATS_EXTRA_NAME_LEN 64 diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_types.h b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h index be2cb9df67..4da9332bab 100644 --- a/platform/linux-generic/include-abi/odp/api/abi/packet_types.h +++ b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h @@ -49,39 +49,6 @@ typedef ODP_HANDLE_T(odp_packet_tx_compl_t); #define ODP_PACKET_OFFSET_INVALID 0xffff -typedef uint8_t odp_proto_l2_type_t; - -#define ODP_PROTO_L2_TYPE_NONE 0 -#define ODP_PROTO_L2_TYPE_ETH 1 - -typedef uint8_t odp_proto_l3_type_t; - -#define ODP_PROTO_L3_TYPE_NONE 0 -#define ODP_PROTO_L3_TYPE_ARP 1 -#define ODP_PROTO_L3_TYPE_RARP 2 -#define ODP_PROTO_L3_TYPE_MPLS 3 -#define ODP_PROTO_L3_TYPE_IPV4 4 -#define ODP_PROTO_L3_TYPE_IPV6 6 - -typedef uint8_t odp_proto_l4_type_t; - -/* Numbers from IANA Assigned Internet Protocol Numbers list */ -#define ODP_PROTO_L4_TYPE_NONE 0 -#define ODP_PROTO_L4_TYPE_ICMPV4 1 -#define ODP_PROTO_L4_TYPE_IGMP 2 -#define ODP_PROTO_L4_TYPE_IPV4 4 -#define ODP_PROTO_L4_TYPE_TCP 6 -#define ODP_PROTO_L4_TYPE_UDP 17 -#define ODP_PROTO_L4_TYPE_IPV6 41 -#define ODP_PROTO_L4_TYPE_GRE 47 -#define ODP_PROTO_L4_TYPE_ESP 50 -#define ODP_PROTO_L4_TYPE_AH 51 -#define ODP_PROTO_L4_TYPE_ICMPV6 58 -#define ODP_PROTO_L4_TYPE_NO_NEXT 59 -#define ODP_PROTO_L4_TYPE_IPCOMP 108 -#define ODP_PROTO_L4_TYPE_SCTP 132 -#define ODP_PROTO_L4_TYPE_ROHC 142 - typedef enum { ODP_PACKET_GREEN = 0, ODP_PACKET_YELLOW = 1, diff --git a/platform/linux-generic/include-abi/odp/api/abi/shared_memory.h b/platform/linux-generic/include-abi/odp/api/abi/shared_memory.h index 648bd1ee30..551d49e30c 100644 --- a/platform/linux-generic/include-abi/odp/api/abi/shared_memory.h +++ b/platform/linux-generic/include-abi/odp/api/abi/shared_memory.h @@ -31,6 +31,9 @@ typedef ODP_HANDLE_T(odp_shm_t); #define ODP_SHM_NAME_LEN 32 +#define ODP_SHM_IOVA_INVALID ((uint64_t)-1) +#define ODP_SHM_PA_INVALID ODP_SHM_IOVA_INVALID + /** * @} */ diff --git a/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h b/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h index 9689ddd063..f64a176f52 100644 --- a/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h @@ -26,6 +26,8 @@ typedef struct _odp_buffer_inline_offset_t { } _odp_buffer_inline_offset_t; +extern const _odp_buffer_inline_offset_t _odp_buffer_inline_offset; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-generic/include/odp/api/plat/buffer_inlines.h b/platform/linux-generic/include/odp/api/plat/buffer_inlines.h index d876f60f4f..34d4b5675c 100644 --- a/platform/linux-generic/include/odp/api/plat/buffer_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/buffer_inlines.h @@ -19,15 +19,13 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_event_inline_offset_t _odp_event_inline_offset; -extern const _odp_buffer_inline_offset_t _odp_buffer_inline_offset; -extern const _odp_pool_inline_offset_t _odp_pool_inline; - #ifndef _ODP_NO_INLINE /* Inline functions by default */ #define _ODP_INLINE static inline #define odp_buffer_from_event __odp_buffer_from_event + #define odp_buffer_from_event_multi __odp_buffer_from_event_multi #define odp_buffer_to_event __odp_buffer_to_event + #define odp_buffer_to_event_multi __odp_buffer_to_event_multi #define odp_buffer_addr __odp_buffer_addr #define odp_buffer_size __odp_buffer_size #define odp_buffer_pool __odp_buffer_pool @@ -43,11 +41,23 @@ _ODP_INLINE odp_buffer_t odp_buffer_from_event(odp_event_t ev) return (odp_buffer_t)ev; } +_ODP_INLINE void odp_buffer_from_event_multi(odp_buffer_t buf[], const odp_event_t ev[], int num) +{ + for (int i = 0; i < num; i++) + buf[i] = odp_buffer_from_event(ev[i]); +} + _ODP_INLINE odp_event_t odp_buffer_to_event(odp_buffer_t buf) { return (odp_event_t)buf; } +_ODP_INLINE void odp_buffer_to_event_multi(const odp_buffer_t buf[], odp_event_t ev[], int num) +{ + for (int i = 0; i < num; i++) + ev[i] = odp_buffer_to_event(buf[i]); +} + _ODP_INLINE void *odp_buffer_addr(odp_buffer_t buf) { return _odp_event_hdr_field((odp_event_t)buf, void *, base_data); diff --git a/platform/linux-generic/include/odp/api/plat/event_inline_types.h b/platform/linux-generic/include/odp/api/plat/event_inline_types.h index 77a0deecb6..caa075871c 100644 --- a/platform/linux-generic/include/odp/api/plat/event_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/event_inline_types.h @@ -33,6 +33,8 @@ typedef struct _odp_event_inline_offset_t { } _odp_event_inline_offset_t; +extern const _odp_event_inline_offset_t _odp_event_inline_offset; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-generic/include/odp/api/plat/event_inlines.h b/platform/linux-generic/include/odp/api/plat/event_inlines.h index dd8e9249c8..37c015b217 100644 --- a/platform/linux-generic/include/odp/api/plat/event_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/event_inlines.h @@ -16,9 +16,6 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_event_inline_offset_t _odp_event_inline_offset; -extern const _odp_packet_inline_offset_t _odp_packet_inline; - #ifndef _ODP_NO_INLINE /* Inline functions by default */ #define _ODP_INLINE static inline diff --git a/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h index 723e1a3d1d..773f5171c8 100644 --- a/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h @@ -40,6 +40,8 @@ typedef struct _odp_event_vector_inline_offset_t { } _odp_event_vector_inline_offset_t; +extern const _odp_event_vector_inline_offset_t _odp_event_vector_inline; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-generic/include/odp/api/plat/ipsec_inlines.h b/platform/linux-generic/include/odp/api/plat/ipsec_inlines.h new file mode 100644 index 0000000000..1d1f6ec61b --- /dev/null +++ b/platform/linux-generic/include/odp/api/plat/ipsec_inlines.h @@ -0,0 +1,58 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ODP_PLAT_IPSEC_INLINES_H_ +#define ODP_PLAT_IPSEC_INLINES_H_ + +#include +#include +#include + +#include +#include + +/** @cond _ODP_HIDE_FROM_DOXYGEN_ */ + +#ifndef _ODP_NO_INLINE + /* Inline functions by default */ + #define _ODP_INLINE static inline + #define odp_ipsec_packet_from_event __odp_ipsec_packet_from_event + #define odp_ipsec_packet_to_event __odp_ipsec_packet_to_event + #define odp_ipsec_result __odp_ipsec_result +#else + #define _ODP_INLINE +#endif + +_ODP_INLINE odp_packet_t odp_ipsec_packet_from_event(odp_event_t ev) +{ + _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET); + _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_IPSEC); + + return odp_packet_from_event(ev); +} + +_ODP_INLINE odp_event_t odp_ipsec_packet_to_event(odp_packet_t pkt) +{ + return odp_packet_to_event(pkt); +} + +_ODP_INLINE int odp_ipsec_result(odp_ipsec_packet_result_t *result, odp_packet_t pkt) +{ + odp_ipsec_packet_result_t *res; + + _ODP_ASSERT(result != NULL); + _ODP_ASSERT(odp_packet_subtype(pkt) == ODP_EVENT_PACKET_IPSEC); + + res = _odp_pkt_get_ptr(pkt, odp_ipsec_packet_result_t, ipsec_ctx); + + *result = *res; + + return 0; +} + +/** @endcond */ + +#endif diff --git a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h index 04ad9e9684..9330d89f8b 100644 --- a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h @@ -20,8 +20,6 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_packet_inline_offset_t _odp_packet_inline; - static inline uint64_t _odp_packet_input_flags(odp_packet_t pkt) { return _odp_pkt_get(pkt, uint64_t, input_flags); diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h index 2ae0829c32..ae03457f92 100644 --- a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h @@ -52,9 +52,12 @@ typedef struct _odp_packet_inline_offset_t { uint16_t flags; uint16_t subtype; uint16_t cls_mark; + uint16_t ipsec_ctx; } _odp_packet_inline_offset_t; +extern const _odp_packet_inline_offset_t _odp_packet_inline; + /* Packet input & protocol flags */ typedef union { /* All input flags */ diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h index 21d184ac5a..01d47d8375 100644 --- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h @@ -111,9 +111,6 @@ int _odp_packet_copy_from_mem_seg(odp_packet_t pkt, uint32_t offset, int _odp_packet_copy_to_mem_seg(odp_packet_t pkt, uint32_t offset, uint32_t len, void *dst); -extern const _odp_packet_inline_offset_t _odp_packet_inline; -extern const _odp_pool_inline_offset_t _odp_pool_inline; - _ODP_INLINE void *odp_packet_data(odp_packet_t pkt) { return _odp_pkt_get(pkt, void *, seg_data); diff --git a/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h index 08fb07a627..2f8e0a709f 100644 --- a/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h @@ -42,8 +42,6 @@ #define _ODP_INLINE #endif -extern const _odp_event_vector_inline_offset_t _odp_event_vector_inline; - _ODP_INLINE odp_packet_vector_t odp_packet_vector_from_event(odp_event_t ev) { _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET_VECTOR); diff --git a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h index 9deec89a15..02f59f9828 100644 --- a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h @@ -17,25 +17,28 @@ extern "C" { #endif -/** @internal Pool field accessor */ +#include + +/** @cond _ODP_HIDE_FROM_DOXYGEN_ */ + +/** Pool field accessor */ #define _odp_pool_get(pool, cast, field) \ (*(cast *)(uintptr_t)((uint8_t *)pool + _odp_pool_inline.field)) -/** @internal Pool header field offsets for inline functions */ +/** Pool header field offsets for inline functions */ typedef struct _odp_pool_inline_offset_t { - /** @internal field offset */ uint16_t index; - /** @internal field offset */ uint16_t seg_len; - /** @internal field offset */ uint16_t uarea_size; - /** @internal field offset */ uint16_t ext_head_offset; - /** @internal field offset */ uint16_t ext_pkt_buf_size; } _odp_pool_inline_offset_t; +extern const _odp_pool_inline_offset_t _odp_pool_inline; + +/** @endcond */ + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp/api/plat/pool_inlines.h b/platform/linux-generic/include/odp/api/plat/pool_inlines.h index a304d2881f..58d66fad20 100644 --- a/platform/linux-generic/include/odp/api/plat/pool_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/pool_inlines.h @@ -19,8 +19,6 @@ extern "C" { /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_pool_inline_offset_t _odp_pool_inline; - #ifndef _ODP_NO_INLINE /* Inline functions by default */ #define _ODP_INLINE static inline diff --git a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h index 4eade3dea5..e957785c4e 100644 --- a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h @@ -56,6 +56,8 @@ typedef struct { } _odp_queue_api_fn_t; +extern _odp_queue_inline_offset_t _odp_queue_inline_offset; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-generic/include/odp/api/plat/queue_inlines.h b/platform/linux-generic/include/odp/api/plat/queue_inlines.h index c557b4ba35..22673a887e 100644 --- a/platform/linux-generic/include/odp/api/plat/queue_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/queue_inlines.h @@ -11,7 +11,6 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern _odp_queue_inline_offset_t _odp_queue_inline_offset; extern const _odp_queue_api_fn_t *_odp_queue_api; #ifndef _ODP_NO_INLINE diff --git a/platform/linux-generic/include/odp/api/plat/timer_inline_types.h b/platform/linux-generic/include/odp/api/plat/timer_inline_types.h index ec6804c72e..330cbe4cee 100644 --- a/platform/linux-generic/include/odp/api/plat/timer_inline_types.h +++ b/platform/linux-generic/include/odp/api/plat/timer_inline_types.h @@ -29,6 +29,8 @@ typedef struct _odp_timeout_inline_offset_t { } _odp_timeout_inline_offset_t; +extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset; + /** @endcond */ #ifdef __cplusplus diff --git a/platform/linux-generic/include/odp/api/plat/timer_inlines.h b/platform/linux-generic/include/odp/api/plat/timer_inlines.h index 4f5620cafd..648459c788 100644 --- a/platform/linux-generic/include/odp/api/plat/timer_inlines.h +++ b/platform/linux-generic/include/odp/api/plat/timer_inlines.h @@ -17,8 +17,6 @@ /** @cond _ODP_HIDE_FROM_DOXYGEN_ */ -extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset; - #ifndef _ODP_NO_INLINE /* Inline functions by default */ #define _ODP_INLINE static inline diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h index 00df57bef3..daa783b075 100644 --- a/platform/linux-generic/include/odp_classification_datamodel.h +++ b/platform/linux-generic/include/odp_classification_datamodel.h @@ -145,7 +145,9 @@ typedef struct ODP_ALIGNED_CACHE cos_s { bool queue_group; odp_cls_hash_proto_t hash_proto; odp_pktin_vector_config_t vector; /* Packet vector config */ +#if ODP_DEPRECATED_API odp_cls_drop_t drop_policy; /* Associated Drop Policy */ +#endif size_t headroom; /* Headroom for this CoS */ odp_spinlock_t lock; /* cos lock */ odp_queue_param_t queue_param; diff --git a/platform/linux-generic/include/odp_ipsec_internal.h b/platform/linux-generic/include/odp_ipsec_internal.h index 8e0cb925e4..5717966918 100644 --- a/platform/linux-generic/include/odp_ipsec_internal.h +++ b/platform/linux-generic/include/odp_ipsec_internal.h @@ -83,10 +83,13 @@ int _odp_ipsec_status_send(odp_queue_t queue, int result, odp_ipsec_warn_t warn); -#define IPSEC_MAX_IV_LEN 32 /**< Maximum IV length in bytes */ +#define IPSEC_MAX_IV_LEN 16 /**< Maximum cipher IV length in bytes */ #define IPSEC_MAX_SALT_LEN 4 /**< Maximum salt length in bytes */ +#define CBC_SALT_LEN 8 +#define CBC_IV_LEN (CBC_SALT_LEN + sizeof(uint64_t)) + #define IPSEC_SEQ_HI_LEN 4 /**< ESN Higher bits length in bytes */ /* The minimum supported AR window size */ @@ -167,7 +170,10 @@ struct ipsec_sa_s { uint32_t esp_iv_len; uint32_t esp_pad_mask; - uint8_t salt[IPSEC_MAX_SALT_LEN]; + union { + uint8_t salt[IPSEC_MAX_SALT_LEN]; + uint8_t cbc_salt[CBC_SALT_LEN]; + }; uint32_t salt_length; odp_ipsec_lookup_mode_t lookup_mode; @@ -186,6 +192,7 @@ struct ipsec_sa_s { /* Only for outbound */ unsigned use_counter_iv : 1; + unsigned use_cbc_iv : 1; unsigned tun_ipv4 : 1; /* Only for inbound */ @@ -272,6 +279,14 @@ struct ipsec_sa_s { } out; }; } sa_info; + + /* + * Flag to check if the SA soft expiry status event was already + * sent. This field is applicable only for the soft expiry status + * event that gets generated for IPsec SAs configured in inline + * outbound mode. + */ + odp_atomic_u32_t soft_expiry_notified; }; /** diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h index 844088ac12..954602959f 100644 --- a/platform/linux-generic/include/odp_packet_io_internal.h +++ b/platform/linux-generic/include/odp_packet_io_internal.h @@ -39,7 +39,6 @@ extern "C" { #include #include -#define PKTIO_MAX_QUEUES ODP_PKTOUT_MAX_QUEUES #define PKTIO_LSO_PROFILES 16 /* Assume at least Ethernet header per each segment */ #define PKTIO_LSO_MIN_PAYLOAD_OFFSET 14 @@ -70,9 +69,9 @@ struct pktio_if_ops; #elif defined(_ODP_PKTIO_XDP) #define PKTIO_PRIVATE_SIZE 29696 #elif defined(_ODP_PKTIO_DPDK) && ODP_CACHE_LINE_SIZE == 128 -#define PKTIO_PRIVATE_SIZE 4032 +#define PKTIO_PRIVATE_SIZE 4160 #elif defined(_ODP_PKTIO_DPDK) -#define PKTIO_PRIVATE_SIZE 3840 +#define PKTIO_PRIVATE_SIZE 3968 #else #define PKTIO_PRIVATE_SIZE 384 #endif @@ -158,21 +157,13 @@ typedef struct ODP_ALIGNED_CACHE { odp_queue_t queue; odp_pktin_queue_t pktin; odp_pktin_vector_config_t vector; - } in_queue[PKTIO_MAX_QUEUES]; + } in_queue[ODP_PKTIN_MAX_QUEUES]; struct { odp_queue_t queue; odp_pktout_queue_t pktout; - } out_queue[PKTIO_MAX_QUEUES]; + } out_queue[ODP_PKTOUT_MAX_QUEUES]; - /**< inotify instance for pcapng fifos */ - struct { - enum { - PCAPNG_WR_STOP = 0, - PCAPNG_WR_PKT, - } state[PKTIO_MAX_QUEUES]; - int fd[PKTIO_MAX_QUEUES]; - } pcapng; } pktio_entry_t; typedef struct { diff --git a/platform/linux-generic/include/odp_pcapng.h b/platform/linux-generic/include/odp_pcapng.h index b88427238d..6f2a3dda56 100644 --- a/platform/linux-generic/include/odp_pcapng.h +++ b/platform/linux-generic/include/odp_pcapng.h @@ -19,8 +19,8 @@ extern "C" { int _odp_pcapng_start(pktio_entry_t *entry); void _odp_pcapng_stop(pktio_entry_t *entry); -int _odp_pcapng_write_pkts(pktio_entry_t *entry, int qidx, - const odp_packet_t packets[], int num); +int _odp_pcapng_dump_pkts(pktio_entry_t *entry, int qidx, + const odp_packet_t packets[], int num); #ifdef __cplusplus } diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c index f1291ddcde..96ac4c640c 100644 --- a/platform/linux-generic/odp_classification.c +++ b/platform/linux-generic/odp_classification.c @@ -137,7 +137,9 @@ void odp_cls_cos_param_init(odp_cls_cos_param_t *param) param->queue = ODP_QUEUE_INVALID; param->pool = ODP_POOL_INVALID; +#if ODP_DEPRECATED_API param->drop_policy = ODP_COS_DROP_NEVER; +#endif param->num_queue = 1; param->vector.enable = false; odp_queue_param_init(¶m->queue_param); @@ -234,9 +236,11 @@ static inline void _cls_queue_unwind(uint32_t tbl_index, uint32_t j) odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_in) { +#if ODP_DEPRECATED_API + odp_cls_drop_t drop_policy; +#endif uint32_t i, j; odp_queue_t queue; - odp_cls_drop_t drop_policy; cos_t *cos; uint32_t tbl_index; odp_cls_cos_param_t param = *param_in; @@ -275,7 +279,9 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_ } } +#if ODP_DEPRECATED_API drop_policy = param.drop_policy; +#endif for (i = 0; i < CLS_COS_MAX_ENTRY; i++) { cos = &cos_tbl->cos_entry[i]; @@ -336,7 +342,9 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_ cos->pool = param.pool; cos->headroom = 0; cos->valid = 1; +#if ODP_DEPRECATED_API cos->drop_policy = drop_policy; +#endif odp_atomic_init_u32(&cos->num_rule, 0); cos->index = i; cos->vector = param.vector; @@ -493,6 +501,8 @@ uint32_t odp_cls_cos_queues(odp_cos_t cos_id, odp_queue_t queue[], return cos->num_queue; } +#if ODP_DEPRECATED_API + int odp_cos_drop_set(odp_cos_t cos_id, odp_cls_drop_t drop_policy) { cos_t *cos = get_cos_entry(cos_id); @@ -519,6 +529,8 @@ odp_cls_drop_t odp_cos_drop(odp_cos_t cos_id) return cos->drop_policy; } +#endif + int odp_pktio_default_cos_set(odp_pktio_t pktio_in, odp_cos_t default_cos) { pktio_entry_t *entry; @@ -581,10 +593,8 @@ int odp_pktio_headroom_set(odp_pktio_t pktio_in, uint32_t headroom) return 0; } -int odp_cos_with_l2_priority(odp_pktio_t pktio_in, - uint8_t num_qos, - uint8_t qos_table[], - odp_cos_t cos_table[]) +int ODP_DEPRECATE(odp_cos_with_l2_priority)(odp_pktio_t pktio_in, uint8_t num_qos, + uint8_t qos_table[], odp_cos_t cos_table[]) { pmr_l2_cos_t *l2_cos; uint32_t i; diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c index 6be1a01ad7..28d46c7946 100644 --- a/platform/linux-generic/odp_ipsec.c +++ b/platform/linux-generic/odp_ipsec.c @@ -9,8 +9,9 @@ #include #include -#include #include +#include +#include #include #include @@ -550,7 +551,13 @@ static int ipsec_in_iv(odp_packet_t pkt, ipsec_sa_t *ipsec_sa, uint16_t iv_offset) { - memcpy(state->iv, ipsec_sa->salt, ipsec_sa->salt_length); + if (ipsec_sa->salt_length > 0) { + /* It is faster to just copy MAX_SALT_LEN bytes than the exact length */ + ODP_STATIC_ASSERT(IPSEC_MAX_SALT_LEN <= IPSEC_MAX_IV_LEN, + "IPSEC_MAX_SALT_LEN too large"); + memcpy(state->iv, ipsec_sa->salt, IPSEC_MAX_SALT_LEN); + } + _ODP_ASSERT(ipsec_sa->salt_length + ipsec_sa->esp_iv_len <= IPSEC_MAX_IV_LEN); if (odp_packet_copy_to_mem(pkt, iv_offset, ipsec_sa->esp_iv_len, @@ -558,6 +565,7 @@ static int ipsec_in_iv(odp_packet_t pkt, return -1; if (ipsec_sa->aes_ctr_iv) { + ODP_STATIC_ASSERT(IPSEC_MAX_IV_LEN >= 16, "IPSEC_MAX_IV_LEN too small"); state->iv[12] = 0; state->iv[13] = 0; state->iv[14] = 0; @@ -1396,6 +1404,9 @@ static int ipsec_random_data(uint8_t *data, uint32_t len) return 0; } +/* + * Generate cipher IV for outbound processing. + */ static int ipsec_out_iv(ipsec_state_t *state, ipsec_sa_t *ipsec_sa, uint64_t seq_no) @@ -1404,21 +1415,42 @@ static int ipsec_out_iv(ipsec_state_t *state, /* Both GCM and CTR use 8-bit counters */ _ODP_ASSERT(sizeof(seq_no) == ipsec_sa->esp_iv_len); - /* Check for overrun */ - if (seq_no == 0) - return -1; + /* It is faster to just copy MAX_SALT_LEN bytes than the exact length */ + ODP_STATIC_ASSERT(IPSEC_MAX_SALT_LEN <= IPSEC_MAX_IV_LEN, + "IPSEC_MAX_SALT_LEN too large"); + memcpy(state->iv, ipsec_sa->salt, IPSEC_MAX_SALT_LEN); - memcpy(state->iv, ipsec_sa->salt, ipsec_sa->salt_length); - memcpy(state->iv + ipsec_sa->salt_length, &seq_no, - ipsec_sa->esp_iv_len); + _ODP_ASSERT(ipsec_sa->salt_length + sizeof(seq_no) <= IPSEC_MAX_IV_LEN); + memcpy(state->iv + ipsec_sa->salt_length, &seq_no, sizeof(seq_no)); if (ipsec_sa->aes_ctr_iv) { + ODP_STATIC_ASSERT(IPSEC_MAX_IV_LEN >= 16, "IPSEC_MAX_IV_LEN too small"); state->iv[12] = 0; state->iv[13] = 0; state->iv[14] = 0; state->iv[15] = 1; } - } else if (ipsec_sa->esp_iv_len) { + } else if (ipsec_sa->use_cbc_iv) { + /* + * For CBC mode ciphers with 16 byte IV we generate the cipher + * IV by concatenating a per-session random salt value and + * 64-bit sequence number. The ESP IV will be generated at + * ciphering time by CBC-encrypting a zero block using the + * cipher IV. + * + * This way each packet of an SA will have an unpredictable + * IV and different SAs (e.g. manually keyed SAs across + * restarts) will have different IV sequences (so one cannot + * predict IVs of an SA by observing the IVs of another SA + * with the same key). + */ + _ODP_ASSERT(CBC_SALT_LEN + sizeof(seq_no) == ipsec_sa->esp_iv_len); + ODP_STATIC_ASSERT(CBC_SALT_LEN + sizeof(seq_no) <= IPSEC_MAX_IV_LEN, + "IPSEC_MAX_IV_LEN too small for CBC IV construction"); + memcpy(state->iv, ipsec_sa->cbc_salt, CBC_SALT_LEN); + memcpy(state->iv + CBC_SALT_LEN, &seq_no, sizeof(seq_no)); + } else if (odp_unlikely(ipsec_sa->esp_iv_len)) { + _ODP_ASSERT(ipsec_sa->esp_iv_len <= IPSEC_MAX_IV_LEN); if (ipsec_random_data(state->iv, ipsec_sa->esp_iv_len)) return -1; } @@ -1559,10 +1591,13 @@ static int ipsec_out_esp(odp_packet_t *pkt, odp_packet_copy_from_mem(*pkt, ipsec_offset, _ODP_ESPHDR_LEN, &esp); - odp_packet_copy_from_mem(*pkt, - ipsec_offset + _ODP_ESPHDR_LEN, - ipsec_sa->esp_iv_len, - state->iv + ipsec_sa->salt_length); + if (!ipsec_sa->use_cbc_iv) { + /* copy the relevant part of cipher IV to ESP IV */ + odp_packet_copy_from_mem(*pkt, + ipsec_offset + _ODP_ESPHDR_LEN, + ipsec_sa->esp_iv_len, + state->iv + ipsec_sa->salt_length); + } /* 0xa5 is a good value to fill data instead of generating random data * to create TFC padding */ _odp_packet_set_data(*pkt, esptrl_offset - esptrl.pad_len - tfc_len, @@ -1613,6 +1648,21 @@ static int ipsec_out_esp(odp_packet_t *pkt, ipsec_sa->icv_len; state->stats_length = param->cipher_range.length; + + if (ipsec_sa->use_cbc_iv) { + /* + * Encrypt zeroed ESP IV field using the special cipher IV + * to create the final unpredictable ESP IV + */ + _ODP_ASSERT(ipsec_sa->esp_iv_len == CBC_IV_LEN); + param->cipher_range.offset -= CBC_IV_LEN; + param->cipher_range.length += CBC_IV_LEN; + _odp_packet_set_data(*pkt, + ipsec_offset + _ODP_ESPHDR_LEN, + 0, + CBC_IV_LEN); + } + param->session = ipsec_sa->session; return 0; @@ -2477,6 +2527,25 @@ static void ipsec_out_inline_finalize(odp_packet_t pkt_in[], ipsec_inline_op_t *op = &ops[i]; odp_packet_t *pkt = &pkt_in[i]; + if (op->op.status.warn.soft_exp_packets || op->op.status.warn.soft_exp_bytes) { + if (!odp_atomic_load_u32(&op->op.sa->soft_expiry_notified)) { + int rc; + + /* + * Another thread may have sent the notification by now but we do + * not care since sending duplicate expiry notifications is allowed. + */ + rc = _odp_ipsec_status_send(op->op.sa->queue, + ODP_IPSEC_STATUS_WARN, + op->op.sa->ipsec_sa_hdl, + 0, op->op.status.warn); + if (rc == 0) + odp_atomic_store_u32(&op->op.sa->soft_expiry_notified, 1); + else + _ODP_DBG("IPsec status event submission failed\n"); + } + } + if (odp_unlikely(op->op.status.error.all)) goto handle_err; @@ -2535,34 +2604,6 @@ int odp_ipsec_test_sa_update(odp_ipsec_sa_t sa, return 0; } -int odp_ipsec_result(odp_ipsec_packet_result_t *result, odp_packet_t packet) -{ - odp_ipsec_packet_result_t *res; - - _ODP_ASSERT(result != NULL); - - res = ipsec_pkt_result(packet); - - /* FIXME: maybe postprocess here, setting alg error in case of crypto - * error instead of processing packet fully in ipsec_in/out_single */ - - *result = *res; - - return 0; -} - -odp_packet_t odp_ipsec_packet_from_event(odp_event_t ev) -{ - _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET); - _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_IPSEC); - return odp_packet_from_event(ev); -} - -odp_event_t odp_ipsec_packet_to_event(odp_packet_t pkt) -{ - return odp_packet_to_event(pkt); -} - int odp_ipsec_stats(odp_ipsec_sa_t sa, odp_ipsec_stats_t *stats) { ipsec_sa_t *ipsec_sa; diff --git a/platform/linux-generic/odp_ipsec_api.c b/platform/linux-generic/odp_ipsec_api.c new file mode 100644 index 0000000000..1d1abe84aa --- /dev/null +++ b/platform/linux-generic/odp_ipsec_api.c @@ -0,0 +1,11 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +/* Non-inlined versions of API functions */ +#define _ODP_NO_INLINE +#include diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c index 3097bef4b4..1b3a90e6a8 100644 --- a/platform/linux-generic/odp_ipsec_sad.c +++ b/platform/linux-generic/odp_ipsec_sad.c @@ -485,6 +485,25 @@ static void store_sa_info(ipsec_sa_t *ipsec_sa, const odp_ipsec_sa_param_t *p) ipsec_sa->sa_info.out.mtu = p->outbound.mtu; } +static int init_cbc_salt(ipsec_sa_t *ipsec_sa) +{ + int filled = 0; + int rc; + + if (!ipsec_sa->use_cbc_iv) + return 0; + + while (filled < CBC_SALT_LEN) { + rc = odp_random_data(&ipsec_sa->cbc_salt[filled], + CBC_SALT_LEN - filled, + ODP_RANDOM_CRYPTO); + if (rc < 0) + return -1; + filled += rc; + } + return 0; +} + odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param) { ipsec_sa_t *ipsec_sa; @@ -562,6 +581,7 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param) odp_atomic_init_u64(&ipsec_sa->stats.hard_exp_pkts_err, 0); odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_pkts, 0); odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_bytes, 0); + odp_atomic_init_u32(&ipsec_sa->soft_expiry_notified, 0); if (ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode && ODP_IPSEC_DIR_OUTBOUND == param->dir) { @@ -655,6 +675,7 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param) ipsec_sa->esp_pad_mask = esp_block_len_to_mask(8); break; case ODP_CIPHER_ALG_AES_CBC: + ipsec_sa->use_cbc_iv = 1; ipsec_sa->esp_iv_len = 16; ipsec_sa->esp_pad_mask = esp_block_len_to_mask(16); break; @@ -745,6 +766,9 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param) memcpy(ipsec_sa->salt, salt_param->data, ipsec_sa->salt_length); } + if (init_cbc_salt(ipsec_sa)) + goto error; + if (odp_crypto_session_create(&crypto_param, &ipsec_sa->session, &ses_create_rc)) goto error; diff --git a/platform/linux-generic/odp_ishm.c b/platform/linux-generic/odp_ishm.c index 2c4f364e32..8bb558be2a 100644 --- a/platform/linux-generic/odp_ishm.c +++ b/platform/linux-generic/odp_ishm.c @@ -2174,3 +2174,71 @@ void _odp_ishm_print(int block_index) odp_spinlock_unlock(&ishm_tbl->lock); } + +int32_t odp_system_meminfo(odp_system_meminfo_t *info, odp_system_memblock_t memblock[], + int32_t max_num) +{ + ishm_block_t *block; + int name_len, proc_index; + int32_t i; + uintptr_t addr; + uint64_t len, lost, page_size; + uint64_t lost_total = 0; + uint64_t len_total = 0; + int32_t num = 0; + const uint64_t huge_sz = odp_sys_huge_page_size(); + const uint64_t normal_sz = odp_sys_page_size(); + + odp_spinlock_lock(&ishm_tbl->lock); + procsync(); + + for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) { + block = &ishm_tbl->block[i]; + + len = block->len; + if (len == 0) + continue; + + lost = len - block->user_len; + + if (num < max_num) { + odp_system_memblock_t *mb = &memblock[num]; + + name_len = strlen(block->name); + if (name_len >= ODP_SYSTEM_MEMBLOCK_NAME_LEN) + name_len = ODP_SYSTEM_MEMBLOCK_NAME_LEN - 1; + + memcpy(mb->name, block->name, name_len); + mb->name[name_len] = 0; + + addr = 0; + proc_index = procfind_block(i); + if (proc_index >= 0) + addr = (uintptr_t)ishm_proctable->entry[proc_index].start; + + page_size = 0; + if (block->huge == HUGE) + page_size = huge_sz; + else if (block->huge == NORMAL) + page_size = normal_sz; + + mb->addr = addr; + mb->used = len; + mb->overhead = lost; + mb->page_size = page_size; + } + + len_total += len; + lost_total += lost; + + num++; + } + + odp_spinlock_unlock(&ishm_tbl->lock); + + info->total_mapped = len_total; + info->total_used = len_total; + info->total_overhead = lost_total; + + return num; +} diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index 2ebf602e82..212e48de05 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -67,8 +67,8 @@ const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = { .input_flags = offsetof(odp_packet_hdr_t, p.input_flags), .flags = offsetof(odp_packet_hdr_t, p.flags), .subtype = offsetof(odp_packet_hdr_t, subtype), - .cls_mark = offsetof(odp_packet_hdr_t, cls_mark) - + .cls_mark = offsetof(odp_packet_hdr_t, cls_mark), + .ipsec_ctx = offsetof(odp_packet_hdr_t, ipsec_ctx), }; #include diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index 7944521274..22b6bc9168 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -259,7 +259,7 @@ static void init_out_queues(pktio_entry_t *entry) { int i; - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++) { entry->out_queue[i].queue = ODP_QUEUE_INVALID; entry->out_queue[i].pktout = PKTOUT_INVALID; } @@ -279,7 +279,7 @@ static void init_pktio_entry(pktio_entry_t *entry) odp_atomic_init_u64(&entry->stats_extra.out_discards, 0); odp_atomic_init_u64(&entry->tx_ts, 0); - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) { entry->in_queue[i].queue = ODP_QUEUE_INVALID; entry->in_queue[i].pktin = PKTIN_INVALID; } @@ -602,6 +602,12 @@ int odp_pktio_config(odp_pktio_t hdl, const odp_pktio_config_t *config) return -1; } + if (config->flow_control.pause_rx != ODP_PKTIO_LINK_PAUSE_OFF || + config->flow_control.pause_tx != ODP_PKTIO_LINK_PAUSE_OFF) { + _ODP_ERR("Link flow control is not supported\n"); + return -1; + } + lock_entry(entry); if (entry->state == PKTIO_STATE_STARTED) { unlock_entry(entry); @@ -1509,6 +1515,8 @@ void odp_pktio_config_init(odp_pktio_config_t *config) config->parser.layer = ODP_PROTO_LAYER_ALL; config->reassembly.max_num_frags = 2; + config->flow_control.pause_rx = ODP_PKTIO_LINK_PAUSE_OFF; + config->flow_control.pause_tx = ODP_PKTIO_LINK_PAUSE_OFF; } int odp_pktio_info(odp_pktio_t hdl, odp_pktio_info_t *info) @@ -1813,6 +1821,10 @@ int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa) capa->reassembly.ip = false; capa->reassembly.ipv4 = false; capa->reassembly.ipv6 = false; + capa->flow_control.pause_rx = 0; + capa->flow_control.pfc_rx = 0; + capa->flow_control.pause_tx = 0; + capa->flow_control.pfc_tx = 0; return ret; } @@ -2172,12 +2184,12 @@ int odp_pktin_queue_config(odp_pktio_t pktio, entry = get_pktio_entry(pktio); if (entry == NULL) { - _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio); + _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio); return -1; } if (entry->state == PKTIO_STATE_STARTED) { - _ODP_DBG("pktio %s: not stopped\n", entry->name); + _ODP_ERR("pktio %s: not stopped\n", entry->name); return -1; } @@ -2188,7 +2200,7 @@ int odp_pktin_queue_config(odp_pktio_t pktio, return 0; if (!param->classifier_enable && param->num_queues == 0) { - _ODP_DBG("invalid num_queues for operation mode\n"); + _ODP_ERR("invalid num_queues for operation mode\n"); return -1; } @@ -2196,17 +2208,39 @@ int odp_pktin_queue_config(odp_pktio_t pktio, rc = odp_pktio_capability(pktio, &capa); if (rc) { - _ODP_DBG("pktio %s: unable to read capabilities\n", entry->name); + _ODP_ERR("pktio %s: unable to read capabilities\n", entry->name); return -1; } entry->enabled.cls = !!param->classifier_enable; if (num_queues > capa.max_input_queues) { - _ODP_DBG("pktio %s: too many input queues\n", entry->name); + _ODP_ERR("pktio %s: too many input queues\n", entry->name); return -1; } + /* Check input queue sizes in direct mode */ + for (i = 0; i < num_queues && mode == ODP_PKTIN_MODE_DIRECT; i++) { + uint32_t queue_size = param->queue_size[i]; + + if (queue_size == 0) + continue; + + if (capa.max_input_queue_size == 0) { + _ODP_ERR("pktio %s: configuring input queue size not supported\n", + entry->name); + return -1; + } + if (queue_size < capa.min_input_queue_size) { + _ODP_ERR("pktio %s: input queue size too small\n", entry->name); + return -1; + } + if (queue_size > capa.max_input_queue_size) { + _ODP_ERR("pktio %s: input queue size too large\n", entry->name); + return -1; + } + } + /* Validate packet vector parameters */ if (param->vector.enable) { odp_pool_t pool = param->vector.pool; @@ -2282,7 +2316,7 @@ int odp_pktin_queue_config(odp_pktio_t pktio, queue = odp_queue_create(name, &queue_param); if (queue == ODP_QUEUE_INVALID) { - _ODP_DBG("pktio %s: event queue create failed\n", entry->name); + _ODP_ERR("pktio %s: event queue create failed\n", entry->name); destroy_in_queues(entry, i + 1); return -1; } @@ -2660,13 +2694,6 @@ int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num) return num_queues; } -static inline void _odp_dump_pcapng_pkts(pktio_entry_t *entry, int qidx, - const odp_packet_t packets[], int num) -{ - if (odp_unlikely(entry->pcapng.state[qidx] == PCAPNG_WR_PKT)) - _odp_pcapng_write_pkts(entry, qidx, packets, num); -} - int odp_pktin_recv(odp_pktin_queue_t queue, odp_packet_t packets[], int num) { pktio_entry_t *entry; @@ -2684,7 +2711,7 @@ int odp_pktin_recv(odp_pktin_queue_t queue, odp_packet_t packets[], int num) ret = entry->ops->recv(entry, queue.index, packets, num); if (_ODP_PCAPNG) - _odp_dump_pcapng_pkts(entry, queue.index, packets, ret); + _odp_pcapng_dump_pkts(entry, queue.index, packets, ret); return ret; } @@ -2715,7 +2742,7 @@ int odp_pktin_recv_tmo(odp_pktin_queue_t queue, odp_packet_t packets[], int num, ret = entry->ops->recv_tmo(entry, queue.index, packets, num, wait); if (_ODP_PCAPNG) - _odp_dump_pcapng_pkts(entry, queue.index, packets, ret); + _odp_pcapng_dump_pkts(entry, queue.index, packets, ret); return ret; } @@ -2723,7 +2750,7 @@ int odp_pktin_recv_tmo(odp_pktin_queue_t queue, odp_packet_t packets[], int num, while (1) { ret = entry->ops->recv(entry, queue.index, packets, num); if (_ODP_PCAPNG) - _odp_dump_pcapng_pkts(entry, queue.index, packets, ret); + _odp_pcapng_dump_pkts(entry, queue.index, packets, ret); if (ret != 0 || wait == 0) return ret; @@ -2791,8 +2818,7 @@ int odp_pktin_recv_mq_tmo(const odp_pktin_queue_t queues[], uint32_t num_q, uint entry = get_pktio_entry(queues[lfrom].pktio); if (entry) - _odp_dump_pcapng_pkts(entry, lfrom, packets, - ret); + _odp_pcapng_dump_pkts(entry, lfrom, packets, ret); } return ret; @@ -2910,7 +2936,7 @@ int odp_pktout_send(odp_pktout_queue_t queue, const odp_packet_t packets[], return 0; if (_ODP_PCAPNG) - _odp_dump_pcapng_pkts(entry, queue.index, packets, num); + _odp_pcapng_dump_pkts(entry, queue.index, packets, num); if (odp_unlikely(_odp_pktio_tx_compl_enabled(entry))) { for (int i = 0; i < num; i++) diff --git a/platform/linux-generic/odp_pcapng.c b/platform/linux-generic/odp_pcapng.c index d97e1ca9dc..4423b04831 100644 --- a/platform/linux-generic/odp_pcapng.c +++ b/platform/linux-generic/odp_pcapng.c @@ -1,5 +1,5 @@ /* Copyright (c) 2018, Linaro Limited - * Copyright (c) 2019, Nokia + * Copyright (c) 2019-2022, Nokia * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -11,11 +11,14 @@ #if defined(_ODP_PCAPNG) && _ODP_PCAPNG == 1 +#include #include #include #include +#include +#include #include #include #include @@ -43,6 +46,9 @@ #define INOTIFY_BUF_LEN (16 * (sizeof(struct inotify_event))) #define PCAPNG_WATCH_DIR "/var/run/odp/" +#define PKTIO_MAX_QUEUES (ODP_PKTIN_MAX_QUEUES > ODP_PKTOUT_MAX_QUEUES ? \ + ODP_PKTIN_MAX_QUEUES : ODP_PKTOUT_MAX_QUEUES) + /* pcapng: enhanced packet block file encoding */ typedef struct ODP_PACKED pcapng_section_hdr_block_s { uint32_t block_type; @@ -73,19 +79,36 @@ typedef struct pcapng_enhanced_packet_block_s { uint32_t packet_len; } pcapng_enhanced_packet_block_t; +/** Pktio entry specific data */ +typedef struct { + pktio_entry_t *pktio_entry; + + /* inotify instances for pcapng fifos */ + enum { + PCAPNG_WR_STOP = 0, + PCAPNG_WR_PKT, + } state[PKTIO_MAX_QUEUES]; + int fd[PKTIO_MAX_QUEUES]; +} pcapng_entry_t; + typedef struct ODP_ALIGNED_CACHE { odp_shm_t shm; - pktio_entry_t *entry[ODP_CONFIG_PKTIO_ENTRIES]; int num_entries; pthread_t inotify_thread; int inotify_fd; int inotify_watch_fd; int inotify_is_running; odp_spinlock_t lock; + pcapng_entry_t entry[ODP_CONFIG_PKTIO_ENTRIES]; } pcapng_global_t; static pcapng_global_t *pcapng_gbl; +static inline pcapng_entry_t *pcapng_entry(pktio_entry_t *pktio_entry) +{ + return &pcapng_gbl->entry[odp_pktio_index(pktio_entry->handle)]; +} + int write_pcapng_hdr(pktio_entry_t *entry, int qidx); int _odp_pcapng_init_global(void) @@ -132,6 +155,7 @@ static void pcapng_drain_fifo(int fd) static void inotify_event_handle(pktio_entry_t *entry, int qidx, struct inotify_event *event) { + pcapng_entry_t *pcapng = pcapng_entry(entry); int mtu = _ODP_MAX(odp_pktin_maxlen(entry->handle), odp_pktout_maxlen(entry->handle)); if (event->mask & IN_OPEN) { @@ -140,23 +164,23 @@ static void inotify_event_handle(pktio_entry_t *entry, int qidx, if (PIPE_BUF < mtu + sizeof(pcapng_enhanced_packet_block_t) + sizeof(uint32_t)) { _ODP_ERR("PIPE_BUF:%d too small. Disabling pcap\n", PIPE_BUF); - entry->pcapng.state[qidx] = PCAPNG_WR_STOP; + pcapng->state[qidx] = PCAPNG_WR_STOP; return; } ret = write_pcapng_hdr(entry, qidx); if (ret) { - entry->pcapng.state[qidx] = PCAPNG_WR_STOP; + pcapng->state[qidx] = PCAPNG_WR_STOP; } else { - entry->pcapng.state[qidx] = PCAPNG_WR_PKT; + pcapng->state[qidx] = PCAPNG_WR_PKT; _ODP_DBG("Open %s for pcap tracing\n", event->name); } } else if (event->mask & IN_CLOSE) { - int fd = entry->pcapng.fd[qidx]; + int fd = pcapng->fd[qidx]; pcapng_drain_fifo(fd); - entry->pcapng.state[qidx] = PCAPNG_WR_STOP; + pcapng->state[qidx] = PCAPNG_WR_STOP; _ODP_DBG("Close %s for pcap tracing\n", event->name); } else { _ODP_ERR("Unknown inotify event 0x%08x\n", event->mask); @@ -207,7 +231,7 @@ static pktio_entry_t *pktio_from_event(struct inotify_event *event) odp_spinlock_lock(&pcapng_gbl->lock); for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; i++) { - pktio_entry_t *entry = pcapng_gbl->entry[i]; + pktio_entry_t *entry = pcapng_gbl->entry[i].pktio_entry; if (entry == NULL) continue; @@ -287,6 +311,7 @@ static int get_fifo_max_size(void) int _odp_pcapng_start(pktio_entry_t *entry) { + pcapng_entry_t *pcapng = pcapng_entry(entry); int ret = -1, fd; pthread_attr_t attr; unsigned int i; @@ -301,8 +326,8 @@ int _odp_pcapng_start(pktio_entry_t *entry) char pcapng_name[128]; char pcapng_path[256]; - entry->pcapng.fd[i] = -1; - entry->pcapng.state[i] = PCAPNG_WR_STOP; + pcapng->fd[i] = -1; + pcapng->state[i] = PCAPNG_WR_STOP; get_pcapng_fifo_name(pcapng_name, sizeof(pcapng_name), entry->name, i); @@ -320,7 +345,7 @@ int _odp_pcapng_start(pktio_entry_t *entry) fd = open(pcapng_path, O_RDWR | O_NONBLOCK); if (fd == -1) { _ODP_ERR("Fail to open fifo\n"); - entry->pcapng.state[i] = PCAPNG_WR_STOP; + pcapng->state[i] = PCAPNG_WR_STOP; if (remove(pcapng_path) == -1) _ODP_ERR("Can't remove fifo %s\n", pcapng_path); continue; @@ -333,14 +358,14 @@ int _odp_pcapng_start(pktio_entry_t *entry) _ODP_DBG("set pcap fifo size %i\n", fifo_sz); } - entry->pcapng.fd[i] = fd; + pcapng->fd[i] = fd; } odp_spinlock_lock(&pcapng_gbl->lock); /* already running from a previous pktio */ if (pcapng_gbl->inotify_is_running == 1) { - pcapng_gbl->entry[odp_pktio_index(entry->handle)] = entry; + pcapng->pktio_entry = entry; pcapng_gbl->num_entries++; odp_spinlock_unlock(&pcapng_gbl->lock); return 0; @@ -371,7 +396,7 @@ int _odp_pcapng_start(pktio_entry_t *entry) if (ret) { _ODP_ERR("Can't start inotify thread (ret=%d). pcapng disabled.\n", ret); } else { - pcapng_gbl->entry[odp_pktio_index(entry->handle)] = entry; + pcapng->pktio_entry = entry; pcapng_gbl->num_entries++; pcapng_gbl->inotify_is_running = 1; } @@ -390,13 +415,14 @@ int _odp_pcapng_start(pktio_entry_t *entry) void _odp_pcapng_stop(pktio_entry_t *entry) { + pcapng_entry_t *pcapng = pcapng_entry(entry); int ret; unsigned int i; unsigned int max_queue = _ODP_MAX(entry->num_in_queue, entry->num_out_queue); odp_spinlock_lock(&pcapng_gbl->lock); - pcapng_gbl->entry[odp_pktio_index(entry->handle)] = NULL; + pcapng->pktio_entry = NULL; pcapng_gbl->num_entries--; if (pcapng_gbl->inotify_is_running == 1 && @@ -427,8 +453,8 @@ void _odp_pcapng_stop(pktio_entry_t *entry) char pcapng_name[128]; char pcapng_path[256]; - entry->pcapng.state[i] = PCAPNG_WR_STOP; - close(entry->pcapng.fd[i]); + pcapng->state[i] = PCAPNG_WR_STOP; + close(pcapng->fd[i]); get_pcapng_fifo_name(pcapng_name, sizeof(pcapng_name), entry->name, i); @@ -442,10 +468,11 @@ void _odp_pcapng_stop(pktio_entry_t *entry) int write_pcapng_hdr(pktio_entry_t *entry, int qidx) { + pcapng_entry_t *pcapng = pcapng_entry(entry); size_t len; pcapng_section_hdr_block_t shb; pcapng_interface_description_block_t idb; - int fd = entry->pcapng.fd[qidx]; + int fd = pcapng->fd[qidx]; memset(&shb, 0, sizeof(shb)); memset(&idb, 0, sizeof(idb)); @@ -503,17 +530,21 @@ static ssize_t write_fifo(int fd, struct iovec *iov, int iovcnt) return len; } -int _odp_pcapng_write_pkts(pktio_entry_t *entry, int qidx, - const odp_packet_t packets[], int num) +int _odp_pcapng_dump_pkts(pktio_entry_t *entry, int qidx, + const odp_packet_t packets[], int num) { + pcapng_entry_t *pcapng = pcapng_entry(entry); int i = 0; struct iovec packet_iov[3 * num]; pcapng_enhanced_packet_block_t epb[num]; int iovcnt = 0; ssize_t block_len = 0; - int fd = entry->pcapng.fd[qidx]; + int fd = pcapng->fd[qidx]; ssize_t len = 0, wlen; + if (odp_likely(pcapng->state[qidx] != PCAPNG_WR_PKT)) + return 0; + for (i = 0; i < num; i++) { odp_packet_hdr_t *pkt_hdr = packet_hdr(packets[i]); uint32_t seg_len; diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c index f90189d05b..172a4d336c 100644 --- a/platform/linux-generic/odp_schedule_scalable.c +++ b/platform/linux-generic/odp_schedule_scalable.c @@ -716,7 +716,7 @@ static void pktio_start(int pktio_idx, _ODP_ASSERT(pktio_idx < ODP_CONFIG_PKTIO_ENTRIES); for (i = 0; i < num_in_queue; i++) { rxq = in_queue_idx[i]; - _ODP_ASSERT(rxq < PKTIO_MAX_QUEUES); + _ODP_ASSERT(rxq < ODP_PKTIN_MAX_QUEUES); __atomic_fetch_add(&global->poll_count[pktio_idx], 1, __ATOMIC_RELAXED); qentry = _odp_qentry_from_ext(odpq[i]); diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c index 4103568a7e..a0e822a535 100644 --- a/platform/linux-generic/odp_shared_memory.c +++ b/platform/linux-generic/odp_shared_memory.c @@ -124,6 +124,31 @@ int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info) info->size = ishm_info.size; info->page_size = ishm_info.page_size; info->flags = ishm_info.user_flags; + info->num_seg = 1; + + return 0; +} + +int odp_shm_segment_info(odp_shm_t shm, uint32_t index, uint32_t num, + odp_shm_segment_info_t seg_info[]) +{ + odp_shm_info_t info; + + /* No physical memory segment information available */ + if (index != 0 || num != 1) { + _ODP_ERR("Only single segment supported (%u, %u)\n", index, num); + return -1; + } + + if (odp_shm_info(shm, &info)) { + _ODP_ERR("SHM info call failed\n"); + return -1; + } + + seg_info[0].addr = (uintptr_t)info.addr; + seg_info[0].iova = ODP_SHM_IOVA_INVALID; + seg_info[0].pa = ODP_SHM_PA_INVALID; + seg_info[0].len = info.size; return 0; } diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 90e83a4f2c..05660eb7ab 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -652,7 +652,7 @@ static odp_event_t timer_cancel(timer_pool_t *tp, uint32_t idx) static inline void timer_expire(timer_pool_t *tp, uint32_t idx, uint64_t tick) { uint64_t exp_tck; - odp_queue_t queue; + odp_queue_t queue = ODP_QUEUE_INVALID; _odp_timer_t *tim = &tp->timers[idx]; tick_buf_t *tb = &tp->tick_buf[idx]; odp_event_t tmo_event = ODP_EVENT_INVALID; diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c index 30e6a0e612..7e9db90ce7 100644 --- a/platform/linux-generic/pktio/dpdk.c +++ b/platform/linux-generic/pktio/dpdk.c @@ -104,7 +104,7 @@ ODP_STATIC_ASSERT(DPDK_MIN_RX_BURST <= UINT8_MAX, "DPDK_MIN_RX_BURST too large") /** DPDK runtime configuration options */ typedef struct { - int num_rx_desc; + int num_rx_desc_default; int num_tx_desc_default; uint8_t multicast_en; uint8_t rx_drop_en; @@ -141,7 +141,7 @@ typedef struct ODP_ALIGNED_CACHE { /* Minimum RX burst size */ uint8_t min_rx_burst; /* Cache for storing extra RX packets */ - pkt_cache_t rx_cache[PKTIO_MAX_QUEUES]; + pkt_cache_t rx_cache[ODP_PKTIN_MAX_QUEUES]; /* --- Control path data --- */ @@ -161,15 +161,17 @@ typedef struct ODP_ALIGNED_CACHE { uint8_t mtu_set; /* Use system call to get/set vdev promisc mode */ uint8_t vdev_sysc_promisc; + /* Number of RX descriptors per queue */ + uint16_t num_rx_desc[ODP_PKTIN_MAX_QUEUES]; /* Number of TX descriptors per queue */ - uint16_t num_tx_desc[PKTIO_MAX_QUEUES]; + uint16_t num_tx_desc[ODP_PKTOUT_MAX_QUEUES]; /* --- Locks for MT safe operations --- */ /* RX queue locks */ - odp_ticketlock_t rx_lock[PKTIO_MAX_QUEUES] ODP_ALIGNED_CACHE; + odp_ticketlock_t rx_lock[ODP_PKTIN_MAX_QUEUES] ODP_ALIGNED_CACHE; /* TX queue locks */ - odp_ticketlock_t tx_lock[PKTIO_MAX_QUEUES] ODP_ALIGNED_CACHE; + odp_ticketlock_t tx_lock[ODP_PKTOUT_MAX_QUEUES] ODP_ALIGNED_CACHE; } pkt_dpdk_t; ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_dpdk_t), @@ -227,14 +229,8 @@ static int init_options(pktio_entry_t *pktio_entry, int val; if (!lookup_opt("num_rx_desc", dev_info->driver_name, - &opt->num_rx_desc)) + &opt->num_rx_desc_default)) return -1; - if (opt->num_rx_desc < dev_info->rx_desc_lim.nb_min || - opt->num_rx_desc > dev_info->rx_desc_lim.nb_max || - opt->num_rx_desc % dev_info->rx_desc_lim.nb_align) { - _ODP_ERR("Invalid number of RX descriptors\n"); - return -1; - } if (!lookup_opt("num_tx_desc", dev_info->driver_name, &opt->num_tx_desc_default)) @@ -255,7 +251,7 @@ static int init_options(pktio_entry_t *pktio_entry, _ODP_DBG("DPDK interface (%s): %" PRIu16 "\n", dev_info->driver_name, pkt_priv(pktio_entry)->port_id); _ODP_DBG(" multicast_en: %d\n", opt->multicast_en); - _ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc); + _ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc_default); _ODP_DBG(" num_tx_desc: %d\n", opt->num_tx_desc_default); _ODP_DBG(" rx_drop_en: %d\n", opt->rx_drop_en); @@ -1264,7 +1260,7 @@ static int dpdk_close(pktio_entry_t *pktio_entry) unsigned i, j; /* Free cache packets */ - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) { idx = pkt_dpdk->rx_cache[i].idx; for (j = 0; j < pkt_dpdk->rx_cache[i].count; j++) @@ -1493,21 +1489,51 @@ static void prepare_rss_conf(pktio_entry_t *pktio_entry, static int dpdk_input_queues_config(pktio_entry_t *pktio_entry, const odp_pktin_queue_param_t *p) { + struct rte_eth_dev_info dev_info; + pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry); odp_pktin_mode_t mode = pktio_entry->param.in_mode; uint8_t lockless; + int ret; prepare_rss_conf(pktio_entry, p); /** * Scheduler synchronizes input queue polls. Only single thread * at a time polls a queue */ - if (mode == ODP_PKTIN_MODE_SCHED || - p->op_mode == ODP_PKTIO_OP_MT_UNSAFE) + if (mode == ODP_PKTIN_MODE_SCHED || p->op_mode == ODP_PKTIO_OP_MT_UNSAFE) lockless = 1; else lockless = 0; - pkt_priv(pktio_entry)->flags.lockless_rx = lockless; + pkt_dpdk->flags.lockless_rx = lockless; + + ret = rte_eth_dev_info_get(pkt_dpdk->port_id, &dev_info); + if (ret) { + _ODP_ERR("DPDK: rte_eth_dev_info_get() failed: %d\n", ret); + return -1; + } + + /* Configure RX descriptors */ + for (uint32_t i = 0; i < p->num_queues; i++) { + uint16_t num_rx_desc = pkt_dpdk->opt.num_rx_desc_default; + + if (mode == ODP_PKTIN_MODE_DIRECT && p->queue_size[i] != 0) { + num_rx_desc = p->queue_size[i]; + /* Make sure correct alignment is used */ + if (dev_info.rx_desc_lim.nb_align) + num_rx_desc = RTE_ALIGN_MUL_CEIL(num_rx_desc, + dev_info.rx_desc_lim.nb_align); + } + + if (num_rx_desc < dev_info.rx_desc_lim.nb_min || + num_rx_desc > dev_info.rx_desc_lim.nb_max || + num_rx_desc % dev_info.rx_desc_lim.nb_align) { + _ODP_ERR("DPDK: invalid number of RX descriptors (%" PRIu16 ") for queue " + "%" PRIu32 "\n", num_rx_desc, i); + return -1; + } + pkt_dpdk->num_rx_desc[i] = num_rx_desc; + } return 0; } @@ -1548,7 +1574,8 @@ static int dpdk_output_queues_config(pktio_entry_t *pktio_entry, if (num_tx_desc < dev_info.tx_desc_lim.nb_min || num_tx_desc > dev_info.tx_desc_lim.nb_max || num_tx_desc % dev_info.tx_desc_lim.nb_align) { - _ODP_ERR("DPDK: invalid number of TX descriptors\n"); + _ODP_ERR("DPDK: invalid number of TX descriptors (%" PRIu16 ") for queue " + "%" PRIu32 "\n", num_tx_desc, i); return -1; } pkt_dpdk->num_tx_desc[i] = num_tx_desc; @@ -1572,7 +1599,9 @@ static int dpdk_init_capability(pktio_entry_t *pktio_entry, memset(capa, 0, sizeof(odp_pktio_capability_t)); capa->max_input_queues = RTE_MIN(dev_info->max_rx_queues, - PKTIO_MAX_QUEUES); + ODP_PKTIN_MAX_QUEUES); + capa->min_input_queue_size = dev_info->rx_desc_lim.nb_min; + capa->max_input_queue_size = dev_info->rx_desc_lim.nb_max; /* ixgbe devices support only 16 rx queues in RSS mode */ if (!strncmp(dev_info->driver_name, IXGBE_DRV_NAME, @@ -1581,7 +1610,7 @@ static int dpdk_init_capability(pktio_entry_t *pktio_entry, capa->max_input_queues); capa->max_output_queues = RTE_MIN(dev_info->max_tx_queues, - PKTIO_MAX_QUEUES); + ODP_PKTOUT_MAX_QUEUES); capa->min_output_queue_size = dev_info->tx_desc_lim.nb_min; capa->max_output_queue_size = dev_info->tx_desc_lim.nb_max; @@ -1845,10 +1874,11 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED, return -1; } - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) odp_ticketlock_init(&pkt_dpdk->rx_lock[i]); + + for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++) odp_ticketlock_init(&pkt_dpdk->tx_lock[i]); - } rte_eth_stats_reset(pkt_dpdk->port_id); @@ -1903,7 +1933,7 @@ static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry, for (i = 0; i < pktio_entry->num_in_queue; i++) { ret = rte_eth_rx_queue_setup(port_id, i, - pkt_dpdk->opt.num_rx_desc, + pkt_dpdk->num_rx_desc[i], rte_eth_dev_socket_id(port_id), &rxconf, pkt_dpdk->pkt_pool); if (ret < 0) { diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c index 947edd4766..b30535f22c 100644 --- a/platform/linux-generic/pktio/loop.c +++ b/platform/linux-generic/pktio/loop.c @@ -44,12 +44,15 @@ #define LOOP_MTU_MIN 68 #define LOOP_MTU_MAX UINT16_MAX -#define LOOP_MAX_TX_QUEUE_SIZE 1024 +#define LOOP_MAX_QUEUE_SIZE 1024 typedef struct { odp_queue_t loopq; /**< loopback queue for "loop" device */ + uint32_t pktin_queue_size; /**< input queue size */ + uint32_t pktout_queue_size; /**< output queue size */ uint16_t mtu; /**< link MTU */ uint8_t idx; /**< index of "loop" device */ + uint8_t queue_create; /**< create or re-create queue during start */ } pkt_loop_t; ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_loop_t), @@ -84,9 +87,11 @@ static int loopback_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry, return -1; } + memset(pkt_loop, 0, sizeof(pkt_loop_t)); pkt_loop->idx = idx; pkt_loop->mtu = LOOP_MTU_MAX; pkt_loop->loopq = ODP_QUEUE_INVALID; + pkt_loop->queue_create = 1; loopback_stats_reset(pktio_entry); loopback_init_capability(pktio_entry); @@ -112,19 +117,23 @@ static int loopback_queue_destroy(odp_queue_t queue) return 0; } -static int loopback_pktout_queue_config(pktio_entry_t *pktio_entry, - const odp_pktout_queue_param_t *param) +static int loopback_start(pktio_entry_t *pktio_entry) { pkt_loop_t *pkt_loop = pkt_priv(pktio_entry); odp_queue_param_t queue_param; char queue_name[ODP_QUEUE_NAME_LEN]; + /* Re-create queue only when necessary */ + if (!pkt_loop->queue_create) + return 0; + /* Destroy old queue */ if (pkt_loop->loopq != ODP_QUEUE_INVALID && loopback_queue_destroy(pkt_loop->loopq)) return -1; odp_queue_param_init(&queue_param); - queue_param.size = param->queue_size[0]; + queue_param.size = pkt_loop->pktin_queue_size > pkt_loop->pktout_queue_size ? + pkt_loop->pktin_queue_size : pkt_loop->pktout_queue_size; snprintf(queue_name, sizeof(queue_name), "_odp_pktio_loopq-%" PRIu64 "", odp_pktio_to_u64(pktio_entry->handle)); @@ -134,6 +143,31 @@ static int loopback_pktout_queue_config(pktio_entry_t *pktio_entry, _ODP_ERR("Creating loopback pktio queue failed\n"); return -1; } + pkt_loop->queue_create = 0; + + return 0; +} + +static int loopback_pktin_queue_config(pktio_entry_t *pktio_entry, + const odp_pktin_queue_param_t *param) +{ + pkt_loop_t *pkt_loop = pkt_priv(pktio_entry); + + if (pktio_entry->param.in_mode == ODP_PKTIN_MODE_DIRECT) { + pkt_loop->pktin_queue_size = param->queue_size[0]; + pkt_loop->queue_create = 1; + } + + return 0; +} + +static int loopback_pktout_queue_config(pktio_entry_t *pktio_entry, + const odp_pktout_queue_param_t *param) +{ + pkt_loop_t *pkt_loop = pkt_priv(pktio_entry); + + pkt_loop->pktout_queue_size = param->queue_size[0]; + pkt_loop->queue_create = 1; return 0; } @@ -485,10 +519,15 @@ static int loopback_init_capability(pktio_entry_t *pktio_entry) capa->maxlen.min_output = LOOP_MTU_MIN; capa->maxlen.max_output = LOOP_MTU_MAX; + capa->min_input_queue_size = 1; + capa->max_input_queue_size = queue_capa.plain.max_size; + if (capa->max_input_queue_size == 0) + capa->max_input_queue_size = LOOP_MAX_QUEUE_SIZE; + capa->min_output_queue_size = 1; capa->max_output_queue_size = queue_capa.plain.max_size; if (capa->max_output_queue_size == 0) - capa->max_output_queue_size = LOOP_MAX_TX_QUEUE_SIZE; + capa->max_output_queue_size = LOOP_MAX_QUEUE_SIZE; odp_pktio_config_init(&capa->config); capa->config.enable_loop = 1; @@ -591,7 +630,7 @@ const pktio_if_ops_t _odp_loopback_pktio_ops = { .term = NULL, .open = loopback_open, .close = loopback_close, - .start = NULL, + .start = loopback_start, .stop = NULL, .stats = loopback_stats, .stats_reset = loopback_stats_reset, @@ -612,6 +651,6 @@ const pktio_if_ops_t _odp_loopback_pktio_ops = { .pktio_ts_from_ns = NULL, .pktio_time = NULL, .config = NULL, - .input_queues_config = NULL, + .input_queues_config = loopback_pktin_queue_config, .output_queues_config = loopback_pktout_queue_config, }; diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c index 41ded2ae13..0352e33f77 100644 --- a/platform/linux-generic/pktio/netmap.c +++ b/platform/linux-generic/pktio/netmap.c @@ -95,9 +95,9 @@ typedef struct { odp_bool_t lockless_rx; /**< no locking for rx */ odp_bool_t lockless_tx; /**< no locking for tx */ /** mapping of pktin queues to netmap rx descriptors */ - netmap_ring_t rx_desc_ring[PKTIO_MAX_QUEUES]; + netmap_ring_t rx_desc_ring[ODP_PKTIN_MAX_QUEUES]; /** mapping of pktout queues to netmap tx descriptors */ - netmap_ring_t tx_desc_ring[PKTIO_MAX_QUEUES]; + netmap_ring_t tx_desc_ring[ODP_PKTOUT_MAX_QUEUES]; netmap_opt_t opt; /**< options */ } pkt_netmap_t; @@ -314,13 +314,15 @@ static inline void netmap_close_descriptors(pktio_entry_t *pktio_entry) int i, j; pkt_netmap_t *pkt_nm = pkt_priv(pktio_entry); - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) { for (j = 0; j < NM_MAX_DESC; j++) { if (pkt_nm->rx_desc_ring[i].desc[j] != NULL) { nm_close(pkt_nm->rx_desc_ring[i].desc[j]); pkt_nm->rx_desc_ring[i].desc[j] = NULL; } } + } + for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++) { for (j = 0; j < NM_MAX_DESC; j++) { if (pkt_nm->tx_desc_ring[i].desc[j] != NULL) { nm_close(pkt_nm->tx_desc_ring[i].desc[j]); @@ -422,8 +424,8 @@ static void netmap_init_capability(pktio_entry_t *pktio_entry) memset(capa, 0, sizeof(odp_pktio_capability_t)); - capa->max_input_queues = PKTIO_MAX_QUEUES; - if (pkt_nm->num_rx_rings < PKTIO_MAX_QUEUES) + capa->max_input_queues = ODP_PKTIN_MAX_QUEUES; + if (pkt_nm->num_rx_rings < ODP_PKTIN_MAX_QUEUES) capa->max_input_queues = pkt_nm->num_rx_rings; if (capa->max_input_queues > NM_MAX_DESC) { /* Have to use a single descriptor to fetch packets from all @@ -434,8 +436,8 @@ static void netmap_init_capability(pktio_entry_t *pktio_entry) NM_MAX_DESC, capa->max_input_queues); } - capa->max_output_queues = PKTIO_MAX_QUEUES; - if (pkt_nm->num_tx_rings < PKTIO_MAX_QUEUES) + capa->max_output_queues = ODP_PKTOUT_MAX_QUEUES; + if (pkt_nm->num_tx_rings < ODP_PKTOUT_MAX_QUEUES) capa->max_output_queues = pkt_nm->num_tx_rings; if (capa->max_output_queues > NM_MAX_DESC) { capa->max_output_queues = NM_MAX_DESC; @@ -578,10 +580,11 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry, nm_close(desc); - for (i = 0; i < PKTIO_MAX_QUEUES; i++) { + for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].lock); + + for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++) odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].lock); - } if (pkt_nm->is_virtual) { static unsigned int mac; diff --git a/platform/linux-generic/pktio/null.c b/platform/linux-generic/pktio/null.c index 0bd33f5170..b067a68eca 100644 --- a/platform/linux-generic/pktio/null.c +++ b/platform/linux-generic/pktio/null.c @@ -130,8 +130,8 @@ static int null_capability(pktio_entry_t *pktio_entry ODP_UNUSED, { memset(capa, 0, sizeof(odp_pktio_capability_t)); - capa->max_input_queues = PKTIO_MAX_QUEUES; - capa->max_output_queues = PKTIO_MAX_QUEUES; + capa->max_input_queues = ODP_PKTIN_MAX_QUEUES; + capa->max_output_queues = ODP_PKTOUT_MAX_QUEUES; capa->set_op.op.promisc_mode = 0; odp_pktio_config_init(&capa->config); diff --git a/platform/linux-generic/pktio/socket_xdp.c b/platform/linux-generic/pktio/socket_xdp.c index b10fc9fce4..be79ca267a 100644 --- a/platform/linux-generic/pktio/socket_xdp.c +++ b/platform/linux-generic/pktio/socket_xdp.c @@ -94,7 +94,7 @@ typedef struct { } xdp_umem_info_t; typedef struct { - xdp_sock_t qs[PKTIO_MAX_QUEUES]; + xdp_sock_t qs[ODP_PKTOUT_MAX_QUEUES]; xdp_umem_info_t *umem_info; uint32_t num_q; int pktio_idx; @@ -275,7 +275,7 @@ static int sock_xdp_open(odp_pktio_t pktio, pktio_entry_t *pktio_entry, const ch priv->max_mtu = pool->seg_len; - for (int i = 0; i < PKTIO_MAX_QUEUES; ++i) { + for (int i = 0; i < ODP_PKTOUT_MAX_QUEUES; ++i) { odp_ticketlock_init(&priv->qs[i].rx_lock); odp_ticketlock_init(&priv->qs[i].tx_lock); } @@ -852,8 +852,8 @@ static int set_queue_capability(int fd, const char *devname, odp_pktio_capabilit channels.max_combined = 1U; } - max_channels = _ODP_MIN((uint32_t)PKTIO_MAX_QUEUES, channels.max_combined); - capa->max_input_queues = max_channels; + max_channels = _ODP_MIN((uint32_t)ODP_PKTOUT_MAX_QUEUES, channels.max_combined); + capa->max_input_queues = _ODP_MIN((uint32_t)ODP_PKTIN_MAX_QUEUES, max_channels); capa->max_output_queues = max_channels; return 0; diff --git a/scripts/ci/build_static_x86_64.sh b/scripts/ci/build_static.sh similarity index 100% rename from scripts/ci/build_static_x86_64.sh rename to scripts/ci/build_static.sh diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c index 5ce7fd7918..0f3b45b18e 100644 --- a/test/common/odp_cunit_common.c +++ b/test/common/odp_cunit_common.c @@ -36,6 +36,7 @@ static int allow_skip_result; static odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX]; static int threads_running; static odp_instance_t instance; +static bool control_thread; static char *progname; static int (*thread_func)(void *); @@ -260,6 +261,7 @@ static int tests_global_init(odp_instance_t *inst) { odp_init_t init_param; odph_helper_options_t helper_options; + odp_thread_type_t thr_type; if (odph_options(&helper_options)) { fprintf(stderr, "error: odph_options() failed.\n"); @@ -273,7 +275,9 @@ static int tests_global_init(odp_instance_t *inst) fprintf(stderr, "error: odp_init_global() failed.\n"); return -1; } - if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) { + + thr_type = control_thread ? ODP_THREAD_CONTROL : ODP_THREAD_WORKER; + if (0 != odp_init_local(*inst, thr_type)) { fprintf(stderr, "error: odp_init_local() failed.\n"); return -1; } @@ -706,10 +710,14 @@ int odp_cunit_register(odp_suiteinfo_t testsuites[]) */ int odp_cunit_parse_options(int argc, char *argv[]) { + const char *ctrl_thread_env = getenv("CI_THREAD_TYPE_CONTROL"); const char *env = getenv("CI"); progname = argv[0]; odph_parse_options(argc, argv); + /* Check if we need to use control thread */ + if (ctrl_thread_env && !strcmp(ctrl_thread_env, "true")) + control_thread = true; if (env && !strcmp(env, "true")) { allow_skip_result = 1; diff --git a/test/performance/.gitignore b/test/performance/.gitignore index 3a32d04202..ec1915bba7 100644 --- a/test/performance/.gitignore +++ b/test/performance/.gitignore @@ -23,4 +23,5 @@ odp_sched_latency odp_sched_perf odp_sched_pktio odp_scheduling +odp_stress odp_timer_perf diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am index b0885808ed..336a46d22c 100644 --- a/test/performance/Makefile.am +++ b/test/performance/Makefile.am @@ -25,6 +25,7 @@ COMPILE_ONLY = odp_cpu_bench \ odp_sched_perf \ odp_sched_pktio \ odp_scheduling \ + odp_stress \ odp_timer_perf TESTSCRIPTS = odp_cpu_bench_run.sh \ @@ -72,6 +73,7 @@ odp_pool_perf_SOURCES = odp_pool_perf.c odp_queue_perf_SOURCES = odp_queue_perf.c odp_random_SOURCES = odp_random.c odp_sched_perf_SOURCES = odp_sched_perf.c +odp_stress_SOURCES = odp_stress.c odp_timer_perf_SOURCES = odp_timer_perf.c # l2fwd test depends on generator example diff --git a/test/performance/odp_cpu_bench.c b/test/performance/odp_cpu_bench.c index 41a26491f8..e912e167d9 100644 --- a/test/performance/odp_cpu_bench.c +++ b/test/performance/odp_cpu_bench.c @@ -26,7 +26,11 @@ /* Default number of entries in the test lookup table */ #define DEF_LOOKUP_TBL_SIZE (1024 * 1024) -#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1) +#define MAX_WORKERS \ + (((ODP_THREAD_COUNT_MAX - 1) > (MAX_GROUPS * QUEUES_PER_GROUP)) ? \ + (MAX_GROUPS * QUEUES_PER_GROUP) : \ + (ODP_THREAD_COUNT_MAX - 1)) + ODP_STATIC_ASSERT(MAX_WORKERS <= MAX_GROUPS * QUEUES_PER_GROUP, "Not enough queues for all workers"); diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c index 94d9cc6cfd..b381fde314 100644 --- a/test/performance/odp_crypto.c +++ b/test/performance/odp_crypto.c @@ -1197,6 +1197,7 @@ int main(int argc, char *argv[]) odp_init_local(instance, ODP_THREAD_WORKER); odp_sys_info_print(); + memset(&crypto_capa, 0, sizeof(crypto_capa)); if (odp_crypto_capability(&crypto_capa)) { ODPH_ERR("Crypto capability request failed.\n"); diff --git a/test/performance/odp_dma_perf.c b/test/performance/odp_dma_perf.c index 6151072993..62899f913c 100644 --- a/test/performance/odp_dma_perf.c +++ b/test/performance/odp_dma_perf.c @@ -21,6 +21,7 @@ #define DEFAULT_SEG_SIZE 1024U #define ROUNDS 1000000 +#define DEFAULT_WAIT_NS ODP_TIME_SEC_IN_NS #define COMPL_DELIMITER "," /* For now, a static maximum amount of input segments */ #define MAX_NUM_IN_SEGS 64 @@ -43,6 +44,8 @@ #define MEGAS 1000000 #define KILOS 1000 +#define RETRIES 1000U + typedef struct test_config_t { int trs_type; int trs_grn; @@ -51,6 +54,7 @@ typedef struct test_config_t { int seg_type; int num_rounds; int dma_rounds; + uint64_t wait_ns; struct { int num_modes; @@ -104,6 +108,7 @@ static void set_option_defaults(test_config_t *config) config->num_in_seg = 1; config->seg_size = DEFAULT_SEG_SIZE; config->num_rounds = ROUNDS; + config->wait_ns = DEFAULT_WAIT_NS; config->compl_modes.compl_mask = ODP_DMA_COMPL_SYNC; } @@ -172,6 +177,8 @@ static void print_usage(void) " 1: event\n" " -r, --num_rounds Number of times to run the test scenario. %d by\n" " default.\n" + " -w, --wait_nsec Number of nanoseconds to wait for completion events.\n" + " 1 second (1000000000) by default.\n" " -h, --help This help.\n" "\n", MAX_NUM_IN_SEGS, ROUNDS); @@ -254,11 +261,12 @@ static int parse_options(int argc, char **argv, test_config_t *config) { "in_seg_type", required_argument, NULL, 'T' }, { "compl_modes", required_argument, NULL, 'm' }, { "num_rounds", required_argument, NULL, 'r' }, + { "wait_nsec", required_argument, NULL, 'w' }, { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } }; - static const char *shortopts = "t:g:i:s:T:m:r:h"; + static const char *shortopts = "t:g:i:s:T:m:r:w:h"; set_option_defaults(config); @@ -290,6 +298,9 @@ static int parse_options(int argc, char **argv, test_config_t *config) case 'r': config->num_rounds = atoi(optarg); break; + case 'w': + config->wait_ns = atoll(optarg); + break; case 'h': default: print_usage(); @@ -668,7 +679,7 @@ static void print_humanised_speed(uint64_t speed) printf("%" PRIu64 " B/s\n", speed); } -static void print_results(const test_config_t *config, uint64_t time) +static void print_results(const test_config_t *config, uint64_t time, uint32_t retries) { const int is_sync = config->trs_type == TRS_TYPE_SYNC; const uint64_t avg_time = time / config->num_rounds; @@ -705,6 +716,7 @@ static void print_results(const test_config_t *config, uint64_t time) " average transfer speed: ", config->num_rounds, avg_time); print_humanised_speed(avg_speed); + printf(" retries with usec sleep: %u\n", retries); printf("\n=============================================\n"); } @@ -713,7 +725,8 @@ static int run_dma_sync(test_config_t *config) odp_dma_transfer_param_t trs_params[config->dma_rounds]; uint32_t trs_lengths[config->dma_rounds]; odp_time_t start, end; - uint32_t num_rounds = config->num_rounds, offset; + uint32_t num_rounds = config->num_rounds, offset, retries = 0U; + int done = 0; config->test_case_api.trs_base_fn(config, trs_params, trs_lengths); start = odp_time_local_strict(); @@ -724,8 +737,18 @@ static int run_dma_sync(test_config_t *config) for (int i = 0; i < config->dma_rounds; ++i) { config->test_case_api.trs_dyn_fn(config, offset, trs_lengths[i]); - if (odp_dma_transfer(config->dma_config.handle, &trs_params[i], NULL) - <= 0) { + while (1) { + done = odp_dma_transfer(config->dma_config.handle, &trs_params[i], + NULL); + + if (done > 0) + break; + + if (done == 0 && retries++ < RETRIES) { + odp_time_wait_ns(1000U); + continue; + } + ODPH_ERR("Error starting a sync DMA transfer.\n"); return -1; } @@ -735,7 +758,7 @@ static int run_dma_sync(test_config_t *config) } end = odp_time_local_strict(); - print_results(config, odp_time_diff_ns(end, start)); + print_results(config, odp_time_diff_ns(end, start), retries); return 0; } @@ -854,7 +877,8 @@ static void build_wait_list(const test_config_t *config, odp_dma_compl_param_t c static inline int wait_dma_transfers_ready(test_config_t *config, compl_wait_entry_t list[]) { odp_event_t ev; - const uint64_t wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS * 5U); + const uint64_t wait_time = odp_schedule_wait_time(config->wait_ns); + uint64_t start, end; int done = 0; for (int i = 0; i < config->dma_rounds; ++i) { @@ -866,6 +890,9 @@ static inline int wait_dma_transfers_ready(test_config_t *config, compl_wait_ent return -1; } } else { + start = odp_time_local_ns(); + end = start + ODP_TIME_SEC_IN_NS; + while (1) { done = odp_dma_transfer_done(config->dma_config.handle, list[i].id, NULL); @@ -873,7 +900,7 @@ static inline int wait_dma_transfers_ready(test_config_t *config, compl_wait_ent if (done > 0) break; - if (done == 0) + if (done == 0 && odp_time_local_ns() < end) continue; ODPH_ERR("Error waiting poll completion.\n"); @@ -907,10 +934,10 @@ static int run_dma_async_transfer(test_config_t *config) odp_dma_transfer_param_t trs_params[config->dma_rounds]; uint32_t trs_lengths[config->dma_rounds]; odp_dma_compl_param_t compl_params[config->dma_rounds]; - int ret = 0; + int ret = 0, started; compl_wait_entry_t compl_wait_list[config->dma_rounds]; odp_time_t start, end; - uint32_t num_rounds = config->num_rounds, offset; + uint32_t num_rounds = config->num_rounds, offset, retries = 0U; config->test_case_api.trs_base_fn(config, trs_params, trs_lengths); @@ -928,8 +955,18 @@ static int run_dma_async_transfer(test_config_t *config) for (int i = 0; i < config->dma_rounds; ++i) { config->test_case_api.trs_dyn_fn(config, offset, trs_lengths[i]); - if (odp_dma_transfer_start(config->dma_config.handle, &trs_params[i], - &compl_params[i]) <= 0) { + while (1) { + started = odp_dma_transfer_start(config->dma_config.handle, + &trs_params[i], &compl_params[i]); + + if (started > 0) + break; + + if (started == 0 && retries++ < RETRIES) { + odp_time_wait_ns(1000U); + continue; + } + ODPH_ERR("Error starting an async DMA transfer.\n"); ret = -1; goto out_trs_ids; @@ -946,7 +983,7 @@ static int run_dma_async_transfer(test_config_t *config) } end = odp_time_local_strict(); - print_results(config, odp_time_diff_ns(end, start)); + print_results(config, odp_time_diff_ns(end, start), retries); out_compl_evs: free_dma_completion_events(config, compl_params); diff --git a/test/performance/odp_ipsecfwd.c b/test/performance/odp_ipsecfwd.c index b917a976e3..6098fd9649 100644 --- a/test/performance/odp_ipsecfwd.c +++ b/test/performance/odp_ipsecfwd.c @@ -71,11 +71,13 @@ typedef struct pktio_s { odp_queue_t out_ev_qs[MAX_QUEUES]; }; + odp_pktin_queue_t in_dir_qs[MAX_QUEUES]; odph_ethaddr_t src_mac; char *name; odp_pktio_t handle; - odp_bool_t (*send_fn)(const pktio_t *pktio, uint8_t index, odp_packet_t pkt); + uint32_t (*send_fn)(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num); uint32_t num_tx_qs; + uint8_t idx; } pktio_t; typedef struct { @@ -99,8 +101,21 @@ typedef struct prog_config_s prog_config_t; typedef struct ODP_ALIGNED_CACHE { stats_t stats; prog_config_t *prog_config; + int thr_idx; + uint8_t pktio; } thread_config_t; +typedef uint32_t (*rx_fn_t)(thread_config_t *config, odp_event_t evs[], int num); +typedef void (*ipsec_fn_t)(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats); +typedef void (*drain_fn_t)(prog_config_t *config); + +typedef struct { + rx_fn_t rx; + ipsec_fn_t proc; + ipsec_fn_t compl; + drain_fn_t drain; +} ops_t; + typedef struct prog_config_s { odph_thread_t thread_tbl[MAX_WORKERS]; thread_config_t thread_config[MAX_WORKERS]; @@ -108,6 +123,7 @@ typedef struct prog_config_s { fwd_entry_t fwd_entries[MAX_FWDS]; odp_queue_t sa_qs[MAX_SA_QUEUES]; pktio_t pktios[MAX_IFS]; + ops_t ops; char *sa_conf_file; char *fwd_conf_file; odp_instance_t odp_instance; @@ -125,6 +141,8 @@ typedef struct prog_config_s { uint32_t num_sas; uint32_t num_fwds; int num_thrs; + odp_bool_t is_dir_rx; + odp_bool_t is_hashed_tx; uint8_t mode; } prog_config_t; @@ -134,6 +152,23 @@ typedef struct { int type; } exposed_alg_t; +typedef struct { + odp_packet_t pkts[MAX_BURST]; + const pktio_t *pktio; + uint32_t num; +} pkt_vec_t; + +typedef struct { + pkt_vec_t vecs[MAX_QUEUES]; + uint8_t num_qs; +} pkt_out_t; + +typedef struct { + pkt_out_t ifs[MAX_IFS]; + odp_bool_t is_hashed_tx; + uint8_t q_idx; +} pkt_ifs_t; + static exposed_alg_t exposed_algs[] = { ALG_ENTRY(ODP_CIPHER_ALG_NULL, CIPHER_TYPE), ALG_ENTRY(ODP_CIPHER_ALG_DES, CIPHER_TYPE), @@ -163,6 +198,7 @@ static exposed_alg_t exposed_algs[] = { static odp_ipsec_sa_t *spi_to_sa_map[2U][MAX_SPIS]; static odp_atomic_u32_t is_running; static const int ipsec_out_mark; +static __thread pkt_ifs_t ifs; static void init_config(prog_config_t *config) { @@ -329,230 +365,741 @@ static void print_usage(void) " -I, --num_input_qs Input queue count. 1 by default.\n" " -S, --num_sa_qs SA queue count. 1 by default.\n" " -O, --num_output_qs Output queue count. 1 by default.\n" + " -d, --direct_rx Use direct RX. Interfaces will be polled by workers\n" + " directly. \"--mode\", \"--num_input_qs\" and\n" + " \"--num_output_qs\" options are ignored, input and output\n" + " queue counts will match worker count.\n" " -h, --help This help.\n" "\n"); } -static odp_bool_t setup_ipsec(prog_config_t *config) +static inline odp_ipsec_sa_t *get_in_sa(odp_packet_t pkt) { - odp_queue_param_t q_param; - odp_ipsec_config_t ipsec_config; - char q_name[ODP_QUEUE_NAME_LEN]; - - snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_status"); - odp_queue_param_init(&q_param); - q_param.type = ODP_QUEUE_TYPE_SCHED; - q_param.sched.prio = odp_schedule_default_prio(); - q_param.sched.sync = ODP_SCHED_SYNC_PARALLEL; - q_param.sched.group = ODP_SCHED_GROUP_ALL; - config->compl_q = odp_queue_create(q_name, &q_param); + odph_esphdr_t esp; + uint32_t spi; - if (config->compl_q == ODP_QUEUE_INVALID) { - ODPH_ERR("Error creating IPsec completion queue\n"); - return false; - } + if (!odp_packet_has_ipsec(pkt)) + return NULL; - odp_ipsec_config_init(&ipsec_config); - ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC; - ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC; - ipsec_config.inbound.default_queue = config->compl_q; - /* For tunnel to tunnel, we need to parse up to this to check the UDP port for SA. */ - ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_L4; + if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_ESPHDR_LEN, &esp) < 0) + return NULL; - if (odp_ipsec_config(&ipsec_config) < 0) { - ODPH_ERR("Error configuring IPsec\n"); - return false; - } + spi = odp_be_to_cpu_32(esp.spi); - return true; + return spi <= UINT16_MAX ? spi_to_sa_map[DIR_IN][spi] : NULL; } -static odp_bool_t create_sa_dest_queues(odp_ipsec_capability_t *ipsec_capa, - prog_config_t *config) +static inline int process_ipsec_in_enq(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num) { - odp_queue_param_t q_param; - const uint32_t max_sa_qs = MIN(MAX_SA_QUEUES, ipsec_capa->max_queues); + odp_ipsec_in_param_t param; + int left, sent = 0, ret; - if (config->num_sa_qs == 0U || config->num_sa_qs > max_sa_qs) { - ODPH_ERR("Invalid number of SA queues: %u (min: 1, max: %u)\n", config->num_sa_qs, - max_sa_qs); - config->num_sa_qs = 0U; - return false; - } + memset(¶m, 0, sizeof(param)); + /* IPsec in/out need to be identified somehow, so use user_ptr for this. */ + for (int i = 0; i < num; ++i) + odp_packet_user_ptr_set(pkts[i], NULL); - for (uint32_t i = 0U; i < config->num_sa_qs; ++i) { - char q_name[ODP_QUEUE_NAME_LEN]; + while (sent < num) { + left = num - sent; + param.num_sa = left; + param.sa = &sas[sent]; + ret = odp_ipsec_in_enq(&pkts[sent], left, ¶m); - snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_compl_%u", i); - odp_queue_param_init(&q_param); - q_param.type = ODP_QUEUE_TYPE_SCHED; - q_param.sched.prio = odp_schedule_max_prio(); - q_param.sched.sync = config->mode == ORDERED ? ODP_SCHED_SYNC_ORDERED : - ODP_SCHED_SYNC_PARALLEL; - q_param.sched.group = ODP_SCHED_GROUP_ALL; - config->sa_qs[i] = odp_queue_create(q_name, &q_param); + if (odp_unlikely(ret <= 0)) + break; - if (config->sa_qs[i] == ODP_QUEUE_INVALID) { - ODPH_ERR("Error creating SA destination queue (created count: %u)\n", i); - config->num_sa_qs = i; - return false; - } + sent += ret; } - return true; + return sent; } -static void create_sa_entry(uint32_t dir, uint32_t spi, const char *src_ip_str, - const char *dst_ip_str, int cipher_idx, uint8_t *cipher_key, - uint8_t *cipher_key_extra, int auth_idx, uint8_t *auth_key, - uint8_t *auth_key_extra, uint32_t icv_len, uint32_t ar_ws, - uint32_t max_num_sa, prog_config_t *config) +static inline odp_ipsec_sa_t *get_out_sa(odp_packet_t pkt) { - uint32_t src_ip, dst_ip; - odp_ipsec_sa_param_t sa_param; - odp_ipsec_crypto_param_t crypto_param; - odp_ipsec_sa_t sa; + odph_udphdr_t udp; + uint16_t dst_port; - if (config->num_sas == max_num_sa) { - ODPH_ERR("Maximum number of SAs parsed (%u), ignoring rest\n", max_num_sa); - return; - } + if (!odp_packet_has_udp(pkt)) + return NULL; - if (odph_ipv4_addr_parse(&src_ip, src_ip_str) < 0 || - odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) { - ODPH_ERR("Error parsing IP addresses for SA %u\n", spi); - return; - } + if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_UDPHDR_LEN, &udp) < 0) + return NULL; - if (spi > UINT16_MAX) { - ODPH_ERR("Unsupported SPI value for SA %u (> %u)\n", spi, UINT16_MAX); - return; - } + dst_port = odp_be_to_cpu_16(udp.dst_port); - if (spi_to_sa_map[dir][spi] != NULL) { - ODPH_ERR("Non-unique SPIs not supported for SA %u\n", spi); - return; - } + return dst_port ? spi_to_sa_map[DIR_OUT][dst_port] : NULL; +} - src_ip = odp_cpu_to_be_32(src_ip); - dst_ip = odp_cpu_to_be_32(dst_ip); - odp_ipsec_sa_param_init(&sa_param); - sa_param.proto = ODP_IPSEC_ESP; - sa_param.mode = ODP_IPSEC_MODE_TUNNEL; - sa_param.spi = spi; - sa_param.dest_queue = config->sa_qs[config->num_sas % config->num_sa_qs]; +static inline int process_ipsec_out_enq(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num) +{ + odp_ipsec_out_param_t param; + int left, sent = 0, ret; - if (dir > 0U) { - sa_param.dir = ODP_IPSEC_DIR_OUTBOUND; - sa_param.outbound.tunnel.ipv4.src_addr = &src_ip; - sa_param.outbound.tunnel.ipv4.dst_addr = &dst_ip; - } else { - sa_param.dir = ODP_IPSEC_DIR_INBOUND; - sa_param.inbound.lookup_mode = ODP_IPSEC_LOOKUP_DISABLED; - sa_param.inbound.antireplay_ws = ar_ws; - } + memset(¶m, 0, sizeof(param)); + /* IPsec in/out need to be identified somehow, so use user_ptr for this. */ + for (int i = 0; i < num; ++i) + odp_packet_user_ptr_set(pkts[i], &ipsec_out_mark); - crypto_param.cipher_alg = cipher_idx; - crypto_param.cipher_key.data = cipher_key; - crypto_param.cipher_key.length = strlen((const char *)cipher_key); - crypto_param.cipher_key_extra.data = cipher_key_extra; - crypto_param.cipher_key_extra.length = strlen((const char *)cipher_key_extra); - crypto_param.auth_alg = auth_idx; - crypto_param.auth_key.data = auth_key; - crypto_param.auth_key.length = strlen((const char *)auth_key); - crypto_param.auth_key_extra.data = auth_key_extra; - crypto_param.auth_key_extra.length = strlen((const char *)auth_key_extra); - crypto_param.icv_len = icv_len; - sa_param.crypto = crypto_param; - sa = odp_ipsec_sa_create(&sa_param); + while (sent < num) { + left = num - sent; + param.num_sa = left; + param.sa = &sas[sent]; + ret = odp_ipsec_out_enq(&pkts[sent], left, ¶m); - if (sa == ODP_IPSEC_SA_INVALID) { - ODPH_ERR("Error creating SA handle for SA %u\n", spi); - return; + if (odp_unlikely(ret <= 0)) + break; + + sent += ret; } - config->sas[config->num_sas] = sa; - spi_to_sa_map[dir][spi] = &config->sas[config->num_sas]; - ++config->num_sas; + return sent; } -static void parse_sas(prog_config_t *config) +static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd_tbl, + uint8_t *q_idx) { - odp_ipsec_capability_t ipsec_capa; - FILE *file; - int cipher_idx, auth_idx; - uint32_t ar_ws, max_num_sa, dir, spi, icv_len; - char src_ip[16U] = { 0 }, dst_ip[16U] = { 0 }; - uint8_t cipher_key[65U] = { 0U }, cipher_key_extra[5U] = { 0U }, auth_key[65U] = { 0U }, - auth_key_extra[5U] = { 0U }; - - if (config->sa_conf_file == NULL) - return; - - if (odp_ipsec_capability(&ipsec_capa) < 0) { - ODPH_ERR("Error querying IPsec capabilities\n"); - return; - } + const uint32_t l3_off = odp_packet_l3_offset(pkt); + odph_ipv4hdr_t ipv4; + uint32_t dst_ip, src_ip; + fwd_entry_t *fwd; + odph_ethhdr_t eth; - if (!setup_ipsec(config)) - return; + if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0) + return NULL; - if (!create_sa_dest_queues(&ipsec_capa, config)) - return; + dst_ip = odp_be_to_cpu_32(ipv4.dst_addr); - file = fopen(config->sa_conf_file, "r"); + if (odph_iplookup_table_get_value(fwd_tbl, &dst_ip, &fwd, 0U) < 0 || fwd == NULL) + return NULL; - if (file == NULL) { - ODPH_ERR("Error opening SA configuration file: %s\n", strerror(errno)); - return; + if (l3_off != ODPH_ETHHDR_LEN) { + if (l3_off > ODPH_ETHHDR_LEN) { + if (odp_packet_pull_head(pkt, l3_off - ODPH_ETHHDR_LEN) == NULL) + return NULL; + } else { + if (odp_packet_push_head(pkt, ODPH_ETHHDR_LEN - l3_off) == NULL) + return NULL; + } } - ar_ws = MIN(32U, ipsec_capa.max_antireplay_ws); - max_num_sa = MIN(MAX_SAS, ipsec_capa.max_num_sa); - - while (fscanf(file, "%u%u%s%s%d%s%s%d%s%s%u", &dir, &spi, src_ip, dst_ip, - &cipher_idx, cipher_key, cipher_key_extra, &auth_idx, auth_key, - auth_key_extra, &icv_len) == 11) - create_sa_entry(!!dir, spi, src_ip, dst_ip, cipher_idx, cipher_key, - cipher_key_extra, auth_idx, auth_key, auth_key_extra, icv_len, - ar_ws, max_num_sa, config); + eth.dst = fwd->dst_mac; + eth.src = fwd->pktio->src_mac; + eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4); - (void)fclose(file); -} + if (odp_packet_copy_from_mem(pkt, 0U, ODPH_ETHHDR_LEN, ð) < 0) + return NULL; -static const pktio_t *get_pktio(const char *iface, const prog_config_t *config) -{ - for (uint32_t i = 0U; i < config->num_ifs; ++i) { - if (strcmp(iface, config->pktios[i].name) == 0) - return &config->pktios[i]; + if (q_idx != NULL) { + src_ip = odp_be_to_cpu_32(ipv4.src_addr); + *q_idx = (src_ip ^ dst_ip) % fwd->pktio->num_tx_qs; } - return NULL; + return fwd->pktio; } -static void create_fwd_table_entry(const char *dst_ip_str, const char *iface, - const char *dst_mac_str, uint8_t mask, prog_config_t *config) +static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_t fwd_tbl) { - fwd_entry_t *entry; - odph_ethaddr_t dst_mac; - uint32_t dst_ip; - odph_iplookup_prefix_t prefix; + odp_packet_t pkt; + odp_bool_t is_hashed_tx = ifs.is_hashed_tx; + uint8_t q_idx = is_hashed_tx ? 0U : ifs.q_idx, qs_done; + uint8_t *q_idx_ptr = is_hashed_tx ? &q_idx : NULL; + const pktio_t *pktio; + pkt_out_t *out; + pkt_vec_t *vec; + uint32_t num_procd = 0U, ret; - if (config->num_fwds == MAX_FWDS) { - ODPH_ERR("Maximum number of forwarding entries parsed (%u), ignoring rest\n", - MAX_FWDS); - return; - } + for (int i = 0; i < num; ++i) { + pkt = pkts[i]; + pktio = lookup_and_apply(pkt, fwd_tbl, q_idx_ptr); - entry = &config->fwd_entries[config->num_fwds]; + if (pktio == NULL) { + odp_packet_free(pkt); + continue; + } - if (odph_eth_addr_parse(&dst_mac, dst_mac_str) < 0 || - odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) { - ODPH_ERR("Error parsing MAC and IP addresses for forwarding entry\n"); - return; - } + out = &ifs.ifs[pktio->idx]; + vec = &out->vecs[q_idx]; - entry->pktio = get_pktio(iface, config); + if (vec->num == 0U) + out->num_qs++; + + vec->pkts[vec->num++] = pkt; + vec->pktio = pktio; + } + + for (uint32_t i = 0U; i < MAX_IFS; ++i) { + qs_done = 0U; + out = &ifs.ifs[i]; + + for (uint32_t j = 0U; j < MAX_QUEUES && qs_done < out->num_qs; ++j) { + if (out->vecs[j].num == 0U) + continue; + + vec = &out->vecs[j]; + pktio = vec->pktio; + ret = pktio->send_fn(pktio, j, vec->pkts, vec->num); + + if (odp_unlikely(ret < vec->num)) + odp_packet_free_multi(&vec->pkts[ret], vec->num - ret); + + ++qs_done; + vec->num = 0U; + num_procd += ret; + } + + out->num_qs = 0U; + } + + return num_procd; +} + +static inline void process_packets_out_enq(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, + stats_t *stats) +{ + odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST]; + odp_ipsec_sa_t *sa, sas[MAX_BURST]; + int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd; + + for (int i = 0; i < num; ++i) { + pkt = pkts[i]; + sa = get_out_sa(pkt); + + if (sa != NULL) { + sas[num_pkts_ips] = *sa; + pkts_ips[num_pkts_ips] = pkt; + ++num_pkts_ips; + } else { + pkts_fwd[num_pkts_fwd++] = pkt; + } + } + + if (num_pkts_ips > 0) { + num_procd = process_ipsec_out_enq(pkts_ips, sas, num_pkts_ips); + + if (odp_unlikely(num_procd < num_pkts_ips)) { + stats->ipsec_out_errs += num_pkts_ips - num_procd; + odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd); + } + } + + if (num_pkts_fwd > 0) { + num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl); + stats->discards += num_pkts_fwd - num_procd; + stats->fwd_pkts += num_procd; + } +} + +static void process_packets_in_enq(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, + stats_t *stats) +{ + odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST]; + odp_ipsec_sa_t *sa, sas[MAX_BURST]; + int num_pkts_ips = 0, num_pkts_out = 0, num_procd; + + for (int i = 0; i < num; ++i) { + pkt = pkts[i]; + + if (odp_unlikely(odp_packet_has_error(pkt))) { + ++stats->discards; + odp_packet_free(pkt); + continue; + } + + sa = get_in_sa(pkt); + + if (sa != NULL) { + sas[num_pkts_ips] = *sa; + pkts_ips[num_pkts_ips] = pkt; + ++num_pkts_ips; + } else { + pkts_out[num_pkts_out++] = pkt; + } + } + + if (num_pkts_ips > 0) { + num_procd = process_ipsec_in_enq(pkts_ips, sas, num_pkts_ips); + + if (odp_unlikely(num_procd < num_pkts_ips)) { + stats->ipsec_in_errs += num_pkts_ips - num_procd; + odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd); + } + } + + if (num_pkts_out > 0) + process_packets_out_enq(pkts_out, num_pkts_out, fwd_tbl, stats); +} + +static inline odp_bool_t is_ipsec_in(odp_packet_t pkt) +{ + return odp_packet_user_ptr(pkt) == NULL; +} + +static void complete_ipsec_ops(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats) +{ + odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST]; + odp_bool_t is_in; + odp_ipsec_packet_result_t result; + int num_pkts_out = 0, num_pkts_fwd = 0, num_procd; + + for (int i = 0; i < num; ++i) { + pkt = pkts[i]; + is_in = is_ipsec_in(pkt); + + if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) { + is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs; + odp_packet_free(pkt); + continue; + } + + if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) { + is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs; + odp_packet_free(pkt); + continue; + } + + if (is_in) { + ++stats->ipsec_in_pkts; + pkts_out[num_pkts_out++] = pkt; + } else { + ++stats->ipsec_out_pkts; + pkts_fwd[num_pkts_fwd++] = pkt; + } + } + + if (num_pkts_out > 0) + process_packets_out_enq(pkts_out, num_pkts_out, fwd_tbl, stats); + + if (num_pkts_fwd > 0) { + num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl); + stats->discards += num_pkts_fwd - num_procd; + stats->fwd_pkts += num_procd; + } +} + +static void drain_scheduler(prog_config_t *config ODP_UNUSED) +{ + odp_event_t ev; + + while (true) { + ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); + + if (ev == ODP_EVENT_INVALID) + break; + + odp_event_free(ev); + } +} + +static inline int process_ipsec_in(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num, + odp_packet_t pkts_out[]) +{ + odp_ipsec_in_param_t param; + int left, sent = 0, num_out, ret; + + memset(¶m, 0, sizeof(param)); + + while (sent < num) { + left = num - sent; + num_out = left; + param.num_sa = left; + param.sa = &sas[sent]; + ret = odp_ipsec_in(&pkts[sent], left, &pkts_out[sent], &num_out, ¶m); + + if (odp_unlikely(ret <= 0)) + break; + + sent += ret; + } + + return sent; +} + +static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num, + odp_packet_t pkts_out[]) +{ + odp_ipsec_out_param_t param; + int left, sent = 0, num_out, ret; + + memset(¶m, 0, sizeof(param)); + + while (sent < num) { + left = num - sent; + num_out = left; + param.num_sa = left; + param.sa = &sas[sent]; + ret = odp_ipsec_out(&pkts[sent], left, &pkts_out[sent], &num_out, ¶m); + + if (odp_unlikely(ret <= 0)) + break; + + sent += ret; + } + + return sent; +} + +static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, + stats_t *stats) +{ + odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST], pkts_ips_out[MAX_BURST]; + odp_ipsec_sa_t *sa, sas[MAX_BURST]; + int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd; + odp_ipsec_packet_result_t result; + + for (int i = 0; i < num; ++i) { + pkt = pkts[i]; + sa = get_out_sa(pkt); + + if (sa != NULL) { + sas[num_pkts_ips] = *sa; + pkts_ips[num_pkts_ips] = pkt; + ++num_pkts_ips; + } else { + pkts_fwd[num_pkts_fwd++] = pkt; + } + } + + if (num_pkts_ips > 0) { + num_procd = process_ipsec_out(pkts_ips, sas, num_pkts_ips, pkts_ips_out); + + if (odp_unlikely(num_procd < num_pkts_ips)) { + stats->ipsec_out_errs += num_pkts_ips - num_procd; + odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd); + } + + for (int i = 0; i < num_procd; ++i) { + pkt = pkts_ips_out[i]; + + if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) { + ++stats->ipsec_out_errs; + odp_packet_free(pkt); + continue; + } + + if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) { + ++stats->ipsec_out_errs; + odp_packet_free(pkt); + continue; + } + + ++stats->ipsec_out_pkts; + pkts_fwd[num_pkts_fwd++] = pkt; + } + } + + if (num_pkts_fwd > 0) { + num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl); + stats->discards += num_pkts_fwd - num_procd; + stats->fwd_pkts += num_procd; + } +} + +static void process_packets_in(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats) +{ + odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST], pkts_ips_out[MAX_BURST]; + odp_ipsec_sa_t *sa, sas[MAX_BURST]; + int num_pkts_ips = 0, num_pkts_out = 0, num_procd; + odp_ipsec_packet_result_t result; + + for (int i = 0; i < num; ++i) { + pkt = pkts[i]; + + if (odp_unlikely(odp_packet_has_error(pkt))) { + ++stats->discards; + odp_packet_free(pkt); + continue; + } + + sa = get_in_sa(pkt); + + if (sa != NULL) { + sas[num_pkts_ips] = *sa; + pkts_ips[num_pkts_ips] = pkt; + ++num_pkts_ips; + } else { + pkts_out[num_pkts_out++] = pkt; + } + } + + if (num_pkts_ips > 0) { + num_procd = process_ipsec_in(pkts_ips, sas, num_pkts_ips, pkts_ips_out); + + if (odp_unlikely(num_procd < num_pkts_ips)) { + stats->ipsec_in_errs += num_pkts_ips - num_procd; + odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd); + } + + for (int i = 0; i < num_procd; ++i) { + pkt = pkts_ips_out[i]; + + if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) { + ++stats->ipsec_in_errs; + odp_packet_free(pkt); + continue; + } + + if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) { + ++stats->ipsec_in_errs; + odp_packet_free(pkt); + continue; + } + + ++stats->ipsec_in_pkts; + pkts_out[num_pkts_out++] = pkt; + } + } + + if (num_pkts_out > 0) + process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats); +} + +static void drain_direct_inputs(prog_config_t *config) +{ + odp_packet_t pkt; + + for (uint32_t i = 0U; i < config->num_ifs; ++i) { + for (uint32_t j = 0U; j < config->num_input_qs; ++j) { + while (odp_pktin_recv(config->pktios[i].in_dir_qs[j], &pkt, 1) == 1) + odp_packet_free(pkt); + } + } +} + +static odp_bool_t setup_ipsec(prog_config_t *config) +{ + odp_queue_param_t q_param; + odp_ipsec_config_t ipsec_config; + char q_name[ODP_QUEUE_NAME_LEN]; + + if (!config->is_dir_rx) { + snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_status"); + odp_queue_param_init(&q_param); + q_param.type = ODP_QUEUE_TYPE_SCHED; + q_param.sched.prio = odp_schedule_default_prio(); + q_param.sched.sync = ODP_SCHED_SYNC_PARALLEL; + q_param.sched.group = ODP_SCHED_GROUP_ALL; + config->compl_q = odp_queue_create(q_name, &q_param); + + if (config->compl_q == ODP_QUEUE_INVALID) { + ODPH_ERR("Error creating IPsec completion queue\n"); + return false; + } + } + + odp_ipsec_config_init(&ipsec_config); + + if (!config->is_dir_rx) { + ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC; + ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC; + config->ops.proc = process_packets_in_enq; + config->ops.compl = complete_ipsec_ops; + config->ops.drain = drain_scheduler; + } else { + ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_SYNC; + ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_SYNC; + config->ops.proc = process_packets_in; + config->ops.compl = NULL; + config->ops.drain = drain_direct_inputs; + } + + ipsec_config.inbound.default_queue = config->compl_q; + /* For tunnel to tunnel, we need to parse up to this to check the UDP port for SA. */ + ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_L4; + + if (odp_ipsec_config(&ipsec_config) < 0) { + ODPH_ERR("Error configuring IPsec\n"); + return false; + } + + return true; +} + +static odp_bool_t create_sa_dest_queues(odp_ipsec_capability_t *ipsec_capa, + prog_config_t *config) +{ + odp_queue_param_t q_param; + const uint32_t max_sa_qs = MIN(MAX_SA_QUEUES, ipsec_capa->max_queues); + + if (config->num_sa_qs == 0U || config->num_sa_qs > max_sa_qs) { + ODPH_ERR("Invalid number of SA queues: %u (min: 1, max: %u)\n", config->num_sa_qs, + max_sa_qs); + config->num_sa_qs = 0U; + return false; + } + + for (uint32_t i = 0U; i < config->num_sa_qs; ++i) { + char q_name[ODP_QUEUE_NAME_LEN]; + + snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_compl_%u", i); + odp_queue_param_init(&q_param); + q_param.type = ODP_QUEUE_TYPE_SCHED; + q_param.sched.prio = odp_schedule_max_prio(); + q_param.sched.sync = config->mode == ORDERED ? ODP_SCHED_SYNC_ORDERED : + ODP_SCHED_SYNC_PARALLEL; + q_param.sched.group = ODP_SCHED_GROUP_ALL; + config->sa_qs[i] = odp_queue_create(q_name, &q_param); + + if (config->sa_qs[i] == ODP_QUEUE_INVALID) { + ODPH_ERR("Error creating SA destination queue (created count: %u)\n", i); + config->num_sa_qs = i; + return false; + } + } + + return true; +} + +static void create_sa_entry(uint32_t dir, uint32_t spi, const char *src_ip_str, + const char *dst_ip_str, int cipher_idx, uint8_t *cipher_key, + uint8_t *cipher_key_extra, int auth_idx, uint8_t *auth_key, + uint8_t *auth_key_extra, uint32_t icv_len, uint32_t ar_ws, + uint32_t max_num_sa, prog_config_t *config) +{ + uint32_t src_ip, dst_ip; + odp_ipsec_sa_param_t sa_param; + odp_ipsec_crypto_param_t crypto_param; + odp_ipsec_sa_t sa; + + if (config->num_sas == max_num_sa) { + ODPH_ERR("Maximum number of SAs parsed (%u), ignoring rest\n", max_num_sa); + return; + } + + if (odph_ipv4_addr_parse(&src_ip, src_ip_str) < 0 || + odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) { + ODPH_ERR("Error parsing IP addresses for SA %u\n", spi); + return; + } + + if (spi > UINT16_MAX) { + ODPH_ERR("Unsupported SPI value for SA %u (> %u)\n", spi, UINT16_MAX); + return; + } + + if (spi_to_sa_map[dir][spi] != NULL) { + ODPH_ERR("Non-unique SPIs not supported for SA %u\n", spi); + return; + } + + src_ip = odp_cpu_to_be_32(src_ip); + dst_ip = odp_cpu_to_be_32(dst_ip); + odp_ipsec_sa_param_init(&sa_param); + sa_param.proto = ODP_IPSEC_ESP; + sa_param.mode = ODP_IPSEC_MODE_TUNNEL; + sa_param.spi = spi; + sa_param.dest_queue = config->sa_qs[config->num_sas % config->num_sa_qs]; + + if (dir > 0U) { + sa_param.dir = ODP_IPSEC_DIR_OUTBOUND; + sa_param.outbound.tunnel.ipv4.src_addr = &src_ip; + sa_param.outbound.tunnel.ipv4.dst_addr = &dst_ip; + } else { + sa_param.dir = ODP_IPSEC_DIR_INBOUND; + sa_param.inbound.lookup_mode = ODP_IPSEC_LOOKUP_DISABLED; + sa_param.inbound.antireplay_ws = ar_ws; + } + + crypto_param.cipher_alg = cipher_idx; + crypto_param.cipher_key.data = cipher_key; + crypto_param.cipher_key.length = strlen((const char *)cipher_key); + crypto_param.cipher_key_extra.data = cipher_key_extra; + crypto_param.cipher_key_extra.length = strlen((const char *)cipher_key_extra); + crypto_param.auth_alg = auth_idx; + crypto_param.auth_key.data = auth_key; + crypto_param.auth_key.length = strlen((const char *)auth_key); + crypto_param.auth_key_extra.data = auth_key_extra; + crypto_param.auth_key_extra.length = strlen((const char *)auth_key_extra); + crypto_param.icv_len = icv_len; + sa_param.crypto = crypto_param; + sa = odp_ipsec_sa_create(&sa_param); + + if (sa == ODP_IPSEC_SA_INVALID) { + ODPH_ERR("Error creating SA handle for SA %u\n", spi); + return; + } + + config->sas[config->num_sas] = sa; + spi_to_sa_map[dir][spi] = &config->sas[config->num_sas]; + ++config->num_sas; +} + +static void parse_sas(prog_config_t *config) +{ + odp_ipsec_capability_t ipsec_capa; + FILE *file; + int cipher_idx, auth_idx; + uint32_t ar_ws, max_num_sa, dir, spi, icv_len; + char src_ip[16U] = { 0 }, dst_ip[16U] = { 0 }; + uint8_t cipher_key[65U] = { 0U }, cipher_key_extra[5U] = { 0U }, auth_key[65U] = { 0U }, + auth_key_extra[5U] = { 0U }; + + if (config->sa_conf_file == NULL) + return; + + if (odp_ipsec_capability(&ipsec_capa) < 0) { + ODPH_ERR("Error querying IPsec capabilities\n"); + return; + } + + if (!setup_ipsec(config)) + return; + + if (!config->is_dir_rx && !create_sa_dest_queues(&ipsec_capa, config)) + return; + + file = fopen(config->sa_conf_file, "r"); + + if (file == NULL) { + ODPH_ERR("Error opening SA configuration file: %s\n", strerror(errno)); + return; + } + + ar_ws = MIN(32U, ipsec_capa.max_antireplay_ws); + max_num_sa = MIN(MAX_SAS, ipsec_capa.max_num_sa); + + while (fscanf(file, "%u%u%s%s%d%s%s%d%s%s%u", &dir, &spi, src_ip, dst_ip, + &cipher_idx, cipher_key, cipher_key_extra, &auth_idx, auth_key, + auth_key_extra, &icv_len) == 11) + create_sa_entry(!!dir, spi, src_ip, dst_ip, cipher_idx, cipher_key, + cipher_key_extra, auth_idx, auth_key, auth_key_extra, icv_len, + ar_ws, max_num_sa, config); + + (void)fclose(file); +} + +static const pktio_t *get_pktio(const char *iface, const prog_config_t *config) +{ + for (uint32_t i = 0U; i < config->num_ifs; ++i) { + if (strcmp(iface, config->pktios[i].name) == 0) + return &config->pktios[i]; + } + + return NULL; +} + +static void create_fwd_table_entry(const char *dst_ip_str, const char *iface, + const char *dst_mac_str, uint8_t mask, prog_config_t *config) +{ + fwd_entry_t *entry; + odph_ethaddr_t dst_mac; + uint32_t dst_ip; + odph_iplookup_prefix_t prefix; + + if (config->num_fwds == MAX_FWDS) { + ODPH_ERR("Maximum number of forwarding entries parsed (%u), ignoring rest\n", + MAX_FWDS); + return; + } + + entry = &config->fwd_entries[config->num_fwds]; + + if (odph_eth_addr_parse(&dst_mac, dst_mac_str) < 0 || + odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) { + ODPH_ERR("Error parsing MAC and IP addresses for forwarding entry\n"); + return; + } + + entry->pktio = get_pktio(iface, config); if (entry->pktio == NULL) { ODPH_ERR("Invalid interface in forwarding entry: %s\n", iface); @@ -640,6 +1187,11 @@ static parse_result_t check_options(prog_config_t *config) return PRS_NOK; } + if (config->is_dir_rx) { + config->num_input_qs = config->num_thrs; + config->num_output_qs = config->num_thrs; + } + return PRS_OK; } @@ -658,11 +1210,12 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config { "num_input_qs", required_argument, NULL, 'I' }, { "num_sa_qs", required_argument, NULL, 'S' }, { "num_output_qs", required_argument, NULL, 'O' }, + { "direct_rx", no_argument, NULL, 'd'}, { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } }; - static const char *shortopts = "i:n:l:c:m:s:f:I:S:O:h"; + static const char *shortopts = "i:n:l:c:m:s:f:I:S:O:dh"; while (true) { opt = getopt_long(argc, argv, shortopts, longopts, &long_index); @@ -701,6 +1254,9 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config case 'O': config->num_output_qs = atoi(optarg); break; + case 'd': + config->is_dir_rx = true; + break; case 'h': print_usage(); return PRS_TERM; @@ -732,436 +1288,214 @@ static parse_result_t setup_program(int argc, char **argv, prog_config_t *config return parse_options(argc, argv, config); } -static odp_bool_t send(const pktio_t *pktio, uint8_t index, odp_packet_t pkt) -{ - return odp_pktout_send(pktio->out_dir_qs[index], &pkt, 1) == 1; -} - -static odp_bool_t enqueue(const pktio_t *pktio, uint8_t index, odp_packet_t pkt) -{ - return odp_queue_enq(pktio->out_ev_qs[index], odp_packet_to_event(pkt)) == 0; -} - -static odp_bool_t setup_pktios(prog_config_t *config) -{ - odp_pool_param_t pool_param; - pktio_t *pktio; - odp_pktio_param_t pktio_param; - odp_pktin_queue_param_t pktin_param; - odp_pktio_capability_t capa; - odp_pktout_queue_param_t pktout_param; - odp_pktio_config_t pktio_config; - uint32_t max_output_qs; - - odp_pool_param_init(&pool_param); - pool_param.pkt.seg_len = config->pkt_len; - pool_param.pkt.len = config->pkt_len; - pool_param.pkt.num = config->num_pkts; - pool_param.type = ODP_POOL_PACKET; - config->pktio_pool = odp_pool_create(PROG_NAME, &pool_param); - - if (config->pktio_pool == ODP_POOL_INVALID) { - ODPH_ERR("Error creating packet I/O pool\n"); - return false; - } - - for (uint32_t i = 0U; i < config->num_ifs; ++i) { - pktio = &config->pktios[i]; - odp_pktio_param_init(&pktio_param); - pktio_param.in_mode = ODP_PKTIN_MODE_SCHED; - pktio_param.out_mode = config->mode == ORDERED ? ODP_PKTOUT_MODE_QUEUE : - ODP_PKTOUT_MODE_DIRECT; - pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param); - - if (pktio->handle == ODP_PKTIO_INVALID) { - ODPH_ERR("Error opening packet I/O (%s)\n", pktio->name); - return false; - } - - if (odp_pktio_capability(pktio->handle, &capa) < 0) { - ODPH_ERR("Error querying packet I/O capabilities (%s)\n", pktio->name); - return false; - } - - if (config->num_input_qs == 0U || config->num_input_qs > capa.max_input_queues) { - ODPH_ERR("Invalid number of input queues for packet I/O: %u (min: 1, max: " - "%u) (%s)\n", config->num_input_qs, capa.max_input_queues, - pktio->name); - return false; - } - - max_output_qs = MIN(MAX_QUEUES, capa.max_output_queues); - - if (config->num_output_qs == 0U || config->num_output_qs > max_output_qs) { - ODPH_ERR("Invalid number of output queues for packet I/O: %u (min: 1, " - "max: %u) (%s)\n", config->num_output_qs, max_output_qs, - pktio->name); - return false; - } - - odp_pktin_queue_param_init(&pktin_param); - - if (config->mode == ORDERED) - pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED; - - if (config->num_input_qs > 1U) { - pktin_param.hash_enable = true; - pktin_param.hash_proto.proto.ipv4_udp = 1U; - pktin_param.num_queues = config->num_input_qs; - } - - if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) { - ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name); - return false; - } - - pktio->send_fn = config->mode == ORDERED ? enqueue : send; - pktio->num_tx_qs = config->num_output_qs; - odp_pktout_queue_param_init(&pktout_param); - pktout_param.num_queues = pktio->num_tx_qs; - pktout_param.op_mode = config->num_thrs > (int)pktio->num_tx_qs ? - ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE; - - if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) { - ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name); - return false; - } - - if (config->mode == ORDERED) { - if (odp_pktout_event_queue(pktio->handle, pktio->out_ev_qs, - pktio->num_tx_qs) != (int)pktio->num_tx_qs) { - ODPH_ERR("Error querying packet I/O output event queue (%s)\n", - pktio->name); - return false; - } - } else { - if (odp_pktout_queue(pktio->handle, pktio->out_dir_qs, pktio->num_tx_qs) - != (int)pktio->num_tx_qs) { - ODPH_ERR("Error querying packet I/O output queue (%s)\n", - pktio->name); - return false; - } - } - - odp_pktio_config_init(&pktio_config); - - if (odp_pktio_config(pktio->handle, &pktio_config) < 0) { - ODPH_ERR("Error configuring packet I/O extra options (%s)\n", pktio->name); - return false; - } - - if (odp_pktio_mac_addr(pktio->handle, &pktio->src_mac, sizeof(pktio->src_mac)) - != sizeof(pktio->src_mac)) { - ODPH_ERR("Error getting packet I/O MAC address (%s)\n", pktio->name); - return false; - } - - if (odp_pktio_start(pktio->handle) < 0) { - ODPH_ERR("Error starting packet I/O (%s)\n", pktio->name); - return false; - } - } - - return true; -} - -static odp_bool_t setup_fwd_table(prog_config_t *config) -{ - fwd_entry_t *fwd_e; - - config->fwd_tbl = odph_iplookup_table_create(SHORT_PROG_NAME "_fwd_tbl", 0U, 0U, - sizeof(fwd_entry_t *)); - - if (config->fwd_tbl == NULL) { - ODPH_ERR("Error creating forwarding table\n"); - return false; - } - - for (uint32_t i = 0U; i < config->num_fwds; ++i) { - fwd_e = &config->fwd_entries[i]; - - if (odph_iplookup_table_put_value(config->fwd_tbl, &fwd_e->prefix, &fwd_e) < 0) { - ODPH_ERR("Error populating forwarding table\n"); - return false; - } - } - - return true; -} - -static inline odp_ipsec_sa_t *get_in_sa(odp_packet_t pkt) -{ - odph_esphdr_t esp; - uint32_t spi; - - if (!odp_packet_has_ipsec(pkt)) - return NULL; - - if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_ESPHDR_LEN, &esp) < 0) - return NULL; - - spi = odp_be_to_cpu_32(esp.spi); - - return spi <= UINT16_MAX ? spi_to_sa_map[DIR_IN][spi] : NULL; -} - -static inline int process_ipsec_in(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num) -{ - odp_ipsec_in_param_t param; - int left, sent = 0, ret; - - memset(¶m, 0, sizeof(param)); - /* IPsec in/out need to be identified somehow, so use user_ptr for this. */ - for (int i = 0; i < num; ++i) - odp_packet_user_ptr_set(pkts[i], NULL); - - while (sent < num) { - left = num - sent; - param.num_sa = left; - param.sa = &sas[sent]; - ret = odp_ipsec_in_enq(&pkts[sent], left, ¶m); - - if (odp_unlikely(ret <= 0)) - break; - - sent += ret; - } - - return sent; -} - -static inline odp_ipsec_sa_t *get_out_sa(odp_packet_t pkt) -{ - odph_udphdr_t udp; - uint16_t dst_port; - - if (!odp_packet_has_udp(pkt)) - return NULL; - - if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_UDPHDR_LEN, &udp) < 0) - return NULL; - - dst_port = odp_be_to_cpu_16(udp.dst_port); - - return dst_port ? spi_to_sa_map[DIR_OUT][dst_port] : NULL; -} - -static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num) -{ - odp_ipsec_out_param_t param; - int left, sent = 0, ret; - - memset(¶m, 0, sizeof(param)); - /* IPsec in/out need to be identified somehow, so use user_ptr for this. */ - for (int i = 0; i < num; ++i) - odp_packet_user_ptr_set(pkts[i], &ipsec_out_mark); - - while (sent < num) { - left = num - sent; - param.num_sa = left; - param.sa = &sas[sent]; - ret = odp_ipsec_out_enq(&pkts[sent], left, ¶m); - - if (odp_unlikely(ret <= 0)) - break; - - sent += ret; - } - - return sent; +static uint32_t schedule(thread_config_t *config ODP_UNUSED, odp_event_t evs[], int num) +{ + return odp_schedule_multi_no_wait(NULL, evs, num); } -static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd_tbl, - uint8_t *hash) +static uint32_t recv(thread_config_t *config, odp_event_t evs[], int num) { - const uint32_t l3_off = odp_packet_l3_offset(pkt); - odph_ipv4hdr_t ipv4; - uint32_t dst_ip, src_ip; - fwd_entry_t *fwd; - odph_ethhdr_t eth; + prog_config_t *prog_config = config->prog_config; + pktio_t *pktio = &prog_config->pktios[config->pktio++ % prog_config->num_ifs]; + odp_pktin_queue_t in_q = pktio->in_dir_qs[config->thr_idx % prog_config->num_input_qs]; + odp_packet_t pkts[num]; + int ret; - if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0) - return NULL; + ret = odp_pktin_recv(in_q, pkts, num); - dst_ip = odp_be_to_cpu_32(ipv4.dst_addr); + if (odp_unlikely(ret <= 0)) + return 0U; - if (odph_iplookup_table_get_value(fwd_tbl, &dst_ip, &fwd, 0U) < 0 || fwd == NULL) - return NULL; + odp_packet_to_event_multi(pkts, evs, ret); - if (l3_off != ODPH_ETHHDR_LEN) { - if (l3_off > ODPH_ETHHDR_LEN) { - if (odp_packet_pull_head(pkt, l3_off - ODPH_ETHHDR_LEN) == NULL) - return NULL; - } else { - if (odp_packet_push_head(pkt, ODPH_ETHHDR_LEN - l3_off) == NULL) - return NULL; - } - } + return ret; +} - eth.dst = fwd->dst_mac; - eth.src = fwd->pktio->src_mac; - eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4); +static uint32_t send(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num) +{ + int ret = odp_pktout_send(pktio->out_dir_qs[index], pkts, num); - if (odp_packet_copy_from_mem(pkt, 0U, ODPH_ETHHDR_LEN, ð) < 0) - return NULL; + return ret < 0 ? 0U : (uint32_t)ret; +} + +static uint32_t enqueue(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num) +{ + odp_event_t evs[MAX_BURST]; + int ret; - src_ip = odp_be_to_cpu_32(ipv4.src_addr); - *hash = src_ip ^ dst_ip; + odp_packet_to_event_multi(pkts, evs, num); - return fwd->pktio; + ret = odp_queue_enq_multi(pktio->out_ev_qs[index], evs, num); + + return ret < 0 ? 0U : (uint32_t)ret; } -static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_t fwd_tbl) +static odp_bool_t setup_pktios(prog_config_t *config) { - odp_packet_t pkt; - uint8_t hash = 0U; - const pktio_t *pktio; - uint32_t num_procd = 0U; + odp_pool_param_t pool_param; + pktio_t *pktio; + odp_pktio_param_t pktio_param; + odp_pktin_queue_param_t pktin_param; + odp_pktio_capability_t capa; + odp_pktout_queue_param_t pktout_param; + odp_pktio_config_t pktio_config; + uint32_t max_output_qs; - for (int i = 0; i < num; ++i) { - pkt = pkts[i]; - pktio = lookup_and_apply(pkt, fwd_tbl, &hash); + odp_pool_param_init(&pool_param); + pool_param.pkt.seg_len = config->pkt_len; + pool_param.pkt.len = config->pkt_len; + pool_param.pkt.num = config->num_pkts; + pool_param.type = ODP_POOL_PACKET; + config->pktio_pool = odp_pool_create(PROG_NAME, &pool_param); - if (pktio == NULL) { - odp_packet_free(pkt); - continue; + if (config->pktio_pool == ODP_POOL_INVALID) { + ODPH_ERR("Error creating packet I/O pool\n"); + return false; + } + + config->ops.rx = !config->is_dir_rx ? schedule : recv; + config->is_hashed_tx = !config->is_dir_rx && config->mode == ORDERED; + + for (uint32_t i = 0U; i < config->num_ifs; ++i) { + pktio = &config->pktios[i]; + pktio->idx = i; + odp_pktio_param_init(&pktio_param); + pktio_param.in_mode = !config->is_dir_rx ? + ODP_PKTIN_MODE_SCHED : ODP_PKTIN_MODE_DIRECT; + pktio_param.out_mode = config->is_hashed_tx ? + ODP_PKTOUT_MODE_QUEUE : ODP_PKTOUT_MODE_DIRECT; + pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param); + + if (pktio->handle == ODP_PKTIO_INVALID) { + ODPH_ERR("Error opening packet I/O (%s)\n", pktio->name); + return false; } - if (odp_unlikely(!pktio->send_fn(pktio, hash % pktio->num_tx_qs, pkt))) { - odp_packet_free(pkt); - continue; + if (odp_pktio_capability(pktio->handle, &capa) < 0) { + ODPH_ERR("Error querying packet I/O capabilities (%s)\n", pktio->name); + return false; } - ++num_procd; - } + if (config->num_input_qs == 0U || config->num_input_qs > capa.max_input_queues) { + ODPH_ERR("Invalid number of input queues for packet I/O: %u (min: 1, max: " + "%u) (%s)\n", config->num_input_qs, capa.max_input_queues, + pktio->name); + return false; + } - return num_procd; -} + max_output_qs = MIN(MAX_QUEUES, capa.max_output_queues); -static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, - stats_t *stats) -{ - odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST]; - odp_ipsec_sa_t *sa, sas[MAX_BURST]; - int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd; + if (config->num_output_qs == 0U || config->num_output_qs > max_output_qs) { + ODPH_ERR("Invalid number of output queues for packet I/O: %u (min: 1, " + "max: %u) (%s)\n", config->num_output_qs, max_output_qs, + pktio->name); + return false; + } - for (int i = 0; i < num; ++i) { - pkt = pkts[i]; - sa = get_out_sa(pkt); + odp_pktin_queue_param_init(&pktin_param); - if (sa != NULL) { - sas[num_pkts_ips] = *sa; - pkts_ips[num_pkts_ips] = pkt; - ++num_pkts_ips; - } else { - pkts_fwd[num_pkts_fwd++] = pkt; + if (config->is_hashed_tx) + pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED; + + if (config->num_input_qs > 1U) { + pktin_param.hash_enable = true; + pktin_param.hash_proto.proto.ipv4_udp = 1U; + pktin_param.num_queues = config->num_input_qs; } - } - if (num_pkts_ips > 0) { - num_procd = process_ipsec_out(pkts_ips, sas, num_pkts_ips); + pktin_param.op_mode = (config->is_dir_rx && + config->num_thrs > (int)config->num_input_qs) ? + ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE; - if (odp_unlikely(num_procd < num_pkts_ips)) { - num_procd = num_procd < 0 ? 0 : num_procd; - stats->ipsec_out_errs += num_pkts_ips - num_procd; - odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd); + if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) { + ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name); + return false; } - } - - if (num_pkts_fwd > 0) { - num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl); - stats->discards += num_pkts_fwd - num_procd; - stats->fwd_pkts += num_procd; - } -} -static inline void process_packets_in(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, - stats_t *stats) -{ - odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST]; - odp_ipsec_sa_t *sa, sas[MAX_BURST]; - int num_pkts_ips = 0, num_pkts_out = 0, num_procd; + if (config->is_dir_rx) { + if (odp_pktin_queue(pktio->handle, pktio->in_dir_qs, config->num_input_qs) + != (int)config->num_input_qs) { + ODPH_ERR("Error querying packet I/O input queue (%s)\n", + pktio->name); + return false; + } + } - for (int i = 0; i < num; ++i) { - pkt = pkts[i]; + pktio->send_fn = config->is_hashed_tx ? enqueue : send; + pktio->num_tx_qs = config->num_output_qs; + odp_pktout_queue_param_init(&pktout_param); + pktout_param.num_queues = pktio->num_tx_qs; - if (odp_unlikely(odp_packet_has_error(pkt))) { - ++stats->discards; - odp_packet_free(pkt); - continue; + if (!config->is_hashed_tx) { + pktout_param.op_mode = config->num_thrs > (int)pktio->num_tx_qs ? + ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE; } - sa = get_in_sa(pkt); + if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) { + ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name); + return false; + } - if (sa != NULL) { - sas[num_pkts_ips] = *sa; - pkts_ips[num_pkts_ips] = pkt; - ++num_pkts_ips; + if (config->is_hashed_tx) { + if (odp_pktout_event_queue(pktio->handle, pktio->out_ev_qs, + pktio->num_tx_qs) != (int)pktio->num_tx_qs) { + ODPH_ERR("Error querying packet I/O output event queue (%s)\n", + pktio->name); + return false; + } } else { - pkts_out[num_pkts_out++] = pkt; + if (odp_pktout_queue(pktio->handle, pktio->out_dir_qs, pktio->num_tx_qs) + != (int)pktio->num_tx_qs) { + ODPH_ERR("Error querying packet I/O output queue (%s)\n", + pktio->name); + return false; + } } - } - if (num_pkts_ips > 0) { - num_procd = process_ipsec_in(pkts_ips, sas, num_pkts_ips); + odp_pktio_config_init(&pktio_config); - if (odp_unlikely(num_procd < num_pkts_ips)) { - num_procd = num_procd < 0 ? 0 : num_procd; - stats->ipsec_in_errs += num_pkts_ips - num_procd; - odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd); + if (odp_pktio_config(pktio->handle, &pktio_config) < 0) { + ODPH_ERR("Error configuring packet I/O extra options (%s)\n", pktio->name); + return false; } - } - if (num_pkts_out > 0) - process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats); -} + if (odp_pktio_mac_addr(pktio->handle, &pktio->src_mac, sizeof(pktio->src_mac)) + != sizeof(pktio->src_mac)) { + ODPH_ERR("Error getting packet I/O MAC address (%s)\n", pktio->name); + return false; + } -static inline odp_bool_t is_ipsec_in(odp_packet_t pkt) -{ - return odp_packet_user_ptr(pkt) == NULL; + if (odp_pktio_start(pktio->handle) < 0) { + ODPH_ERR("Error starting packet I/O (%s)\n", pktio->name); + return false; + } + } + + return true; } -static inline void complete_ipsec_ops(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, - stats_t *stats) +static odp_bool_t setup_fwd_table(prog_config_t *config) { - odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST]; - odp_bool_t is_in; - odp_ipsec_packet_result_t result; - int num_pkts_out = 0, num_pkts_fwd = 0, num_procd; + fwd_entry_t *fwd_e; - for (int i = 0; i < num; ++i) { - pkt = pkts[i]; - is_in = is_ipsec_in(pkt); + config->fwd_tbl = odph_iplookup_table_create(SHORT_PROG_NAME "_fwd_tbl", 0U, 0U, + sizeof(fwd_entry_t *)); - if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) { - is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs; - odp_packet_free(pkt); - continue; - } + if (config->fwd_tbl == NULL) { + ODPH_ERR("Error creating forwarding table\n"); + return false; + } - if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) { - is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs; - odp_packet_free(pkt); - continue; - } + for (uint32_t i = 0U; i < config->num_fwds; ++i) { + fwd_e = &config->fwd_entries[i]; - if (is_in) { - ++stats->ipsec_in_pkts; - pkts_out[num_pkts_out++] = pkt; - } else { - ++stats->ipsec_out_pkts; - pkts_fwd[num_pkts_fwd++] = pkt; + if (odph_iplookup_table_put_value(config->fwd_tbl, &fwd_e->prefix, &fwd_e) < 0) { + ODPH_ERR("Error populating forwarding table\n"); + return false; } } - if (num_pkts_out > 0) - process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats); - - if (num_pkts_fwd > 0) { - num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl); - stats->discards += num_pkts_fwd - num_procd; - stats->fwd_pkts += num_procd; - } + return true; } static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats) @@ -1174,42 +1508,33 @@ static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats) odp_event_free(ev); } -static void drain_events(void) -{ - odp_event_t ev; - - while (true) { - ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); - - if (ev == ODP_EVENT_INVALID) - break; - - odp_event_free(ev); - } -} - static int process_packets(void *args) { thread_config_t *config = args; + int thr_idx = odp_thread_id(); odp_event_t evs[MAX_BURST], ev; - int cnt; + ops_t ops = config->prog_config->ops; + uint32_t cnt; odp_event_type_t type; odp_event_subtype_t subtype; odp_packet_t pkt, pkts_in[MAX_BURST], pkts_ips[MAX_BURST]; odph_table_t fwd_tbl = config->prog_config->fwd_tbl; stats_t *stats = &config->stats; + ifs.is_hashed_tx = config->prog_config->is_hashed_tx; + ifs.q_idx = thr_idx % config->prog_config->num_output_qs; + config->thr_idx = thr_idx; odp_barrier_wait(&config->prog_config->init_barrier); while (odp_atomic_load_u32(&is_running)) { int num_pkts_in = 0, num_pkts_ips = 0; /* TODO: Add possibility to configure scheduler and ipsec enq/deq burst sizes. */ - cnt = odp_schedule_multi_no_wait(NULL, evs, MAX_BURST); + cnt = ops.rx(config, evs, MAX_BURST); - if (cnt == 0) + if (cnt == 0U) continue; - for (int i = 0; i < cnt; ++i) { + for (uint32_t i = 0U; i < cnt; ++i) { ev = evs[i]; type = odp_event_types(ev, &subtype); pkt = odp_packet_from_event(ev); @@ -1232,14 +1557,14 @@ static int process_packets(void *args) } if (num_pkts_in > 0) - process_packets_in(pkts_in, num_pkts_in, fwd_tbl, stats); + ops.proc(pkts_in, num_pkts_in, fwd_tbl, stats); - if (num_pkts_ips > 0) - complete_ipsec_ops(pkts_ips, num_pkts_ips, fwd_tbl, stats); + if (ops.compl && num_pkts_ips > 0) + ops.compl(pkts_ips, num_pkts_ips, fwd_tbl, stats); } odp_barrier_wait(&config->prog_config->term_barrier); - drain_events(); + ops.drain(config->prog_config); return 0; } @@ -1348,8 +1673,9 @@ static void teardown_test(const prog_config_t *config) for (uint32_t i = 0U; i < config->num_sas; ++i) (void)odp_ipsec_sa_disable(config->sas[i]); - /* Drain SA status events. */ - wait_sas_disabled(config->num_sas); + if (!config->is_dir_rx) + /* Drain SA status events. */ + wait_sas_disabled(config->num_sas); for (uint32_t i = 0U; i < config->num_sas; ++i) (void)odp_ipsec_sa_destroy(config->sas[i]); @@ -1405,7 +1731,7 @@ int main(int argc, char **argv) init_config(&config); - if (odp_schedule_config(NULL) < 0) { + if (!config.is_dir_rx && odp_schedule_config(NULL) < 0) { ODPH_ERR("Error configuring scheduler\n"); ret = EXIT_FAILURE; goto out_test; diff --git a/test/performance/odp_pool_perf.c b/test/performance/odp_pool_perf.c index 957b1de003..4ae2cf7d33 100644 --- a/test/performance/odp_pool_perf.c +++ b/test/performance/odp_pool_perf.c @@ -1,5 +1,5 @@ /* Copyright (c) 2018, Linaro Limited - * Copyright (c) 2019-2021, Nokia + * Copyright (c) 2019-2022, Nokia * * All rights reserved. * @@ -16,6 +16,13 @@ #include #include +#define STAT_AVAILABLE 0x1 +#define STAT_CACHE 0x2 +#define STAT_THR_CACHE 0x4 +#define STAT_ALLOC_OPS 0x10 +#define STAT_FREE_OPS 0x20 +#define STAT_TOTAL_OPS 0x40 + typedef struct test_options_t { uint32_t num_cpu; uint32_t num_event; @@ -24,6 +31,7 @@ typedef struct test_options_t { uint32_t num_burst; uint32_t data_size; uint32_t cache_size; + uint32_t stats_mode; int pool_type; } test_options_t; @@ -61,6 +69,14 @@ static void print_usage(void) " -b, --burst Maximum number of events per operation\n" " -n, --num_burst Number of bursts allocated/freed back-to-back\n" " -s, --data_size Data size in bytes\n" + " -S, --stats_mode Pool statistics usage. Enable counters with combination of these flags:\n" + " 0: no pool statistics (default)\n" + " 0x1: available\n" + " 0x2: cache_available\n" + " 0x4: thread_cache_available\n" + " 0x10: alloc_ops\n" + " 0x20: free_ops\n" + " 0x40: total_ops\n" " -t, --pool_type 0: Buffer pool (default)\n" " 1: Packet pool\n" " -C, --cache_size Pool cache size (per thread)\n" @@ -81,13 +97,14 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"burst", required_argument, NULL, 'b'}, {"num_burst", required_argument, NULL, 'n'}, {"data_size", required_argument, NULL, 's'}, + {"stats_mode", required_argument, NULL, 'S'}, {"pool_type", required_argument, NULL, 't'}, {"cache_size", required_argument, NULL, 'C'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; - static const char *shortopts = "+c:e:r:b:n:s:t:C:h"; + static const char *shortopts = "+c:e:r:b:n:s:S:t:C:h"; test_options->num_cpu = 1; test_options->num_event = 1000; @@ -95,6 +112,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) test_options->max_burst = 100; test_options->num_burst = 1; test_options->data_size = 64; + test_options->stats_mode = 0; test_options->pool_type = 0; test_options->cache_size = UINT32_MAX; @@ -123,6 +141,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 's': test_options->data_size = atoi(optarg); break; + case 'S': + test_options->stats_mode = strtoul(optarg, NULL, 0); + break; case 't': test_options->pool_type = atoi(optarg); break; @@ -186,6 +207,7 @@ static int create_pool(test_global_t *global) odp_pool_capability_t pool_capa; odp_pool_param_t pool_param; odp_pool_t pool; + odp_pool_stats_opt_t stats, stats_capa; uint32_t max_num, max_size, min_cache_size, max_cache_size; test_options_t *test_options = &global->test_options; uint32_t num_event = test_options->num_event; @@ -195,14 +217,30 @@ static int create_pool(test_global_t *global) uint32_t num_cpu = test_options->num_cpu; uint32_t data_size = test_options->data_size; uint32_t cache_size = test_options->cache_size; + uint32_t stats_mode = test_options->stats_mode; int packet_pool = test_options->pool_type; + stats.all = 0; + odp_pool_param_init(&pool_param); if (cache_size == UINT32_MAX) cache_size = packet_pool ? pool_param.pkt.cache_size : pool_param.buf.cache_size; + if (stats_mode & STAT_AVAILABLE) + stats.bit.available = 1; + if (stats_mode & STAT_CACHE) + stats.bit.cache_available = 1; + if (stats_mode & STAT_THR_CACHE) + stats.bit.thread_cache_available = 1; + if (stats_mode & STAT_ALLOC_OPS) + stats.bit.alloc_ops = 1; + if (stats_mode & STAT_FREE_OPS) + stats.bit.free_ops = 1; + if (stats_mode & STAT_TOTAL_OPS) + stats.bit.total_ops = 1; + printf("\nPool performance test\n"); printf(" num cpu %u\n", num_cpu); printf(" num rounds %u\n", num_round); @@ -211,6 +249,7 @@ static int create_pool(test_global_t *global) printf(" num bursts %u\n", num_burst); printf(" data size %u\n", data_size); printf(" cache size %u\n", cache_size); + printf(" stats mode 0x%x\n", stats_mode); printf(" pool type %s\n\n", packet_pool ? "packet" : "buffer"); if (odp_pool_capability(&pool_capa)) { @@ -223,11 +262,19 @@ static int create_pool(test_global_t *global) max_size = pool_capa.pkt.max_len; max_cache_size = pool_capa.pkt.max_cache_size; min_cache_size = pool_capa.pkt.min_cache_size; + stats_capa = pool_capa.pkt.stats; } else { max_num = pool_capa.buf.max_num; max_size = pool_capa.buf.max_size; max_cache_size = pool_capa.buf.max_cache_size; min_cache_size = pool_capa.buf.min_cache_size; + stats_capa = pool_capa.buf.stats; + } + + if ((stats_capa.all & stats.all) != stats.all) { + printf("Error: requested statistics not supported (0x%" PRIx64 " / 0x%" PRIx64 ")\n", + stats.all, stats_capa.all); + return -1; } if (cache_size < min_cache_size) { @@ -257,7 +304,6 @@ static int create_pool(test_global_t *global) pool_param.pkt.max_num = num_event; pool_param.pkt.max_len = data_size; pool_param.pkt.cache_size = cache_size; - } else { pool_param.type = ODP_POOL_BUFFER; pool_param.buf.num = num_event; @@ -265,6 +311,8 @@ static int create_pool(test_global_t *global) pool_param.buf.cache_size = cache_size; } + pool_param.stats.all = stats.all; + pool = odp_pool_create("pool perf", &pool_param); if (pool == ODP_POOL_INVALID) { @@ -472,6 +520,56 @@ static int start_workers(test_global_t *global, odp_instance_t instance) return 0; } +static void test_stats_perf(test_global_t *global) +{ + odp_pool_stats_t stats; + odp_time_t t1, t2; + uint64_t nsec; + int i; + int num_thr = global->test_options.num_cpu + 1; /* workers + main thread */ + odp_pool_t pool = global->pool; + double nsec_ave = 0.0; + const int rounds = 1000; + + if (num_thr > ODP_POOL_MAX_THREAD_STATS) + num_thr = ODP_POOL_MAX_THREAD_STATS; + + memset(&stats, 0, sizeof(odp_pool_stats_t)); + stats.thread.first = 0; + stats.thread.last = num_thr - 1; + + t1 = odp_time_local_strict(); + + for (i = 0; i < rounds; i++) { + if (odp_pool_stats(pool, &stats)) { + printf("Error: Stats request failed on round %i\n", i); + break; + } + } + + t2 = odp_time_local_strict(); + nsec = odp_time_diff_ns(t2, t1); + + if (i > 0) + nsec_ave = (double)nsec / i; + + printf("Pool statistics:\n"); + printf(" odp_pool_stats() calls %i\n", i); + printf(" ave call latency %.2f nsec\n", nsec_ave); + printf(" num threads %i\n", num_thr); + printf(" alloc_ops %" PRIu64 "\n", stats.alloc_ops); + printf(" free_ops %" PRIu64 "\n", stats.free_ops); + printf(" total_ops %" PRIu64 "\n", stats.total_ops); + printf(" available %" PRIu64 "\n", stats.available); + printf(" cache_available %" PRIu64 "\n", stats.cache_available); + for (i = 0; i < num_thr; i++) { + printf(" thr[%2i] cache_available %" PRIu64 "\n", + i, stats.thread.cache_available[i]); + } + + printf("\n"); +} + static void print_stat(test_global_t *global) { int i, num; @@ -615,6 +713,9 @@ int main(int argc, char **argv) /* Wait workers to exit */ odph_thread_join(global->thread_tbl, global->test_options.num_cpu); + if (global->test_options.stats_mode) + test_stats_perf(global); + print_stat(global); if (odp_pool_destroy(global->pool)) { diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c index d4cbfda19c..c8dc74656f 100644 --- a/test/performance/odp_sched_latency.c +++ b/test/performance/odp_sched_latency.c @@ -27,25 +27,8 @@ #define MAX_QUEUES 4096 /**< Maximum number of queues */ #define MAX_GROUPS 64 #define EVENT_POOL_SIZE (1024 * 1024) /**< Event pool size */ -#define TEST_ROUNDS 10 /**< Test rounds for each thread (millions) */ #define MAIN_THREAD 1 /**< Thread ID performing maintenance tasks */ -/* Default values for command line arguments */ -#define SAMPLE_EVENT_PER_PRIO 0 /**< Allocate a separate sample event for - each priority */ -#define HI_PRIO_EVENTS 0 /**< Number of high priority events */ -#define LO_PRIO_EVENTS 32 /**< Number of low priority events */ -#define HI_PRIO_QUEUES 16 /**< Number of high priority queues */ -#define LO_PRIO_QUEUES 64 /**< Number of low priority queues */ -#define WARM_UP_ROUNDS 100 /**< Number of warm-up rounds */ - -#define EVENTS_PER_HI_PRIO_QUEUE 0 /**< Alloc HI_PRIO_QUEUES x HI_PRIO_EVENTS - events */ -#define EVENTS_PER_LO_PRIO_QUEUE 1 /**< Alloc LO_PRIO_QUEUES x LO_PRIO_EVENTS - events */ -ODP_STATIC_ASSERT(HI_PRIO_QUEUES <= MAX_QUEUES, "Too many HI priority queues"); -ODP_STATIC_ASSERT(LO_PRIO_QUEUES <= MAX_QUEUES, "Too many LO priority queues"); - #define CACHE_ALIGN_ROUNDUP(x)\ ((ODP_CACHE_LINE_SIZE) * \ (((x) + ODP_CACHE_LINE_SIZE - 1) / (ODP_CACHE_LINE_SIZE))) @@ -70,7 +53,7 @@ typedef enum { /** Test event */ typedef struct { - uint64_t ts; /**< Send timestamp */ + odp_time_t time_stamp; /**< Send timestamp */ event_type_t type; /**< Message type */ int src_idx[NUM_PRIOS]; /**< Source ODP queue */ int prio; /**< Source queue priority */ @@ -89,6 +72,7 @@ typedef struct { struct { int queues; /**< Number of scheduling queues */ int events; /**< Number of events */ + int sample_events; odp_bool_t events_per_queue; /**< Allocate 'queues' x 'events' test events */ } prio[NUM_PRIOS]; @@ -103,6 +87,7 @@ typedef struct { uint64_t tot; /**< Total event latency. Sum of all events. */ uint64_t min; /**< Minimum event latency */ uint64_t max; /**< Maximum event latency */ + uint64_t max_idx; /**< Index of the maximum latency sample event */ } test_stat_t; /** Performance test statistics (per core) */ @@ -298,20 +283,24 @@ static void print_results(test_globals_t *globals) else printf(" LO_PRIO events: %i\n", args->prio[LO_PRIO].events); + printf(" LO_PRIO sample events: %i\n", args->prio[LO_PRIO].sample_events); + printf(" HI_PRIO queues: %i\n", args->prio[HI_PRIO].queues); if (args->prio[HI_PRIO].events_per_queue) printf(" HI_PRIO event per queue: %i\n\n", args->prio[HI_PRIO].events); else - printf(" HI_PRIO events: %i\n\n", args->prio[HI_PRIO].events); + printf(" HI_PRIO events: %i\n", args->prio[HI_PRIO].events); + + printf(" HI_PRIO sample events: %i\n\n", args->prio[HI_PRIO].sample_events); for (i = 0; i < NUM_PRIOS; i++) { memset(&total, 0, sizeof(test_stat_t)); total.min = UINT64_MAX; printf("%s priority\n" - "Thread Avg[ns] Min[ns] Max[ns] Samples Total\n" - "---------------------------------------------------------------\n", + "Thread Avg[ns] Min[ns] Max[ns] Samples Total Max idx\n" + "-----------------------------------------------------------------------\n", i == HI_PRIO ? "HIGH" : "LOW"); for (j = 1; j <= args->cpu_count; j++) { lat = &globals->core_stat[j].prio[i]; @@ -331,11 +320,11 @@ static void print_results(test_globals_t *globals) avg = lat->events ? lat->tot / lat->sample_events : 0; printf("%-8d %-10" PRIu64 " %-10" PRIu64 " " - "%-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 "\n", + "%-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 "\n", j, avg, lat->min, lat->max, lat->sample_events, - lat->events); + lat->events, lat->max_idx); } - printf("---------------------------------------------------------------\n"); + printf("-----------------------------------------------------------------------\n"); if (total.sample_events == 0) { printf("Total N/A\n\n"); continue; @@ -398,9 +387,9 @@ static int join_groups(test_globals_t *globals, int thr) */ static int test_schedule(int thr, test_globals_t *globals) { + odp_time_t time; odp_event_t ev; odp_buffer_t buf; - odp_queue_t src_queue; odp_queue_t dst_queue; uint64_t latency; uint64_t i; @@ -416,8 +405,12 @@ static int test_schedule(int thr, test_globals_t *globals) change_queue = globals->args.forward_mode != EVENT_FORWARD_NONE ? 1 : 0; + odp_barrier_wait(&globals->barrier); + for (i = 0; i < test_rounds; i++) { - ev = odp_schedule(&src_queue, ODP_SCHED_WAIT); + ev = odp_schedule(NULL, ODP_SCHED_WAIT); + + time = odp_time_global_strict(); buf = odp_buffer_from_event(ev); event = odp_buffer_addr(buf); @@ -425,10 +418,12 @@ static int test_schedule(int thr, test_globals_t *globals) stats = &globals->core_stat[thr].prio[event->prio]; if (event->type == SAMPLE) { - latency = odp_time_to_ns(odp_time_global()) - event->ts; + latency = odp_time_to_ns(time) - odp_time_to_ns(event->time_stamp); - if (latency > stats->max) + if (latency > stats->max) { stats->max = latency; + stats->max_idx = stats->sample_events; + } if (latency < stats->min) stats->min = latency; stats->tot += latency; @@ -459,7 +454,7 @@ static int test_schedule(int thr, test_globals_t *globals) dst_queue = globals->queue[event->prio][dst_idx]; if (event->type == SAMPLE) - event->ts = odp_time_to_ns(odp_time_global()); + event->time_stamp = odp_time_global_strict(); if (odp_queue_enq(dst_queue, ev)) { ODPH_ERR("[%i] Queue enqueue failed.\n", thr); @@ -472,6 +467,8 @@ static int test_schedule(int thr, test_globals_t *globals) odp_schedule_pause(); while (1) { + odp_queue_t src_queue; + ev = odp_schedule(&src_queue, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) @@ -509,7 +506,6 @@ static int run_thread(void *arg ODP_UNUSED) test_globals_t *globals; test_args_t *args; int thr; - int sample_events = 0; thr = odp_thread_id(); @@ -528,23 +524,18 @@ static int run_thread(void *arg ODP_UNUSED) args = &globals->args; if (enqueue_events(HI_PRIO, args->prio[HI_PRIO].queues, - args->prio[HI_PRIO].events, 1, + args->prio[HI_PRIO].events, args->prio[HI_PRIO].sample_events, !args->prio[HI_PRIO].events_per_queue, globals)) return -1; - if (!args->prio[HI_PRIO].queues || args->sample_per_prio) - sample_events = 1; - if (enqueue_events(LO_PRIO, args->prio[LO_PRIO].queues, - args->prio[LO_PRIO].events, sample_events, + args->prio[LO_PRIO].events, args->prio[LO_PRIO].sample_events, !args->prio[LO_PRIO].events_per_queue, globals)) return -1; } - odp_barrier_wait(&globals->barrier); - if (test_schedule(thr, globals)) return -1; @@ -562,7 +553,7 @@ static void usage(void) "Usage: ./odp_sched_latency [options]\n" "Optional OPTIONS:\n" " -c, --count CPU count, 0=all available, default=1\n" - " -d, --duration Test duration in scheduling rounds (millions), default=%d, min=1\n" + " -d, --duration Test duration in scheduling rounds (millions), default=10, min=1\n" " -f, --forward-mode Selection of target queue\n" " 0: Random (default)\n" " 1: Incremental\n" @@ -573,14 +564,16 @@ static void usage(void) " -i, --isolate Select if shared or isolated groups are used. Ignored when num_group <= 0.\n" " 0: All queues share groups (default)\n" " 1: Separate groups for high and low priority queues. Creates 2xnum_group groups.\n" - " -l, --lo-prio-queues Number of low priority scheduled queues\n" - " -t, --hi-prio-queues Number of high priority scheduled queues\n" - " -m, --lo-prio-events-per-queue Number of events per low priority queue\n" - " -n, --hi-prio-events-per-queue Number of events per high priority queues\n" - " -o, --lo-prio-events Total number of low priority events (overrides the\n" - " number of events per queue)\n" - " -p, --hi-prio-events Total number of high priority events (overrides the\n" - " number of events per queue)\n" + " -l, --lo-prio-queues Number of low priority scheduled queues (default=64)\n" + " -t, --hi-prio-queues Number of high priority scheduled queues (default=16)\n" + " -m, --lo-prio-events-per-queue Number of events per low priority queue (default=32).\n" + " Does not include sample event.\n" + " -n, --hi-prio-events-per-queue Number of events per high priority queues (default=0)\n" + " Does not include sample event.\n" + " -o, --lo-prio-events Total number of low priority events. Overrides the\n" + " number of events per queue, does not include sample event.\n" + " -p, --hi-prio-events Total number of high priority events. Overrides the\n" + " number of events per queue, does not include sample event.\n" " -r --sample-per-prio Allocate a separate sample event for each priority. By default\n" " a single sample event is used and its priority is changed after\n" " each processing round.\n" @@ -588,9 +581,8 @@ static void usage(void) " 0: ODP_SCHED_SYNC_PARALLEL (default)\n" " 1: ODP_SCHED_SYNC_ATOMIC\n" " 2: ODP_SCHED_SYNC_ORDERED\n" - " -w, --warm-up Number of warm-up rounds, default=%d, min=1\n" - " -h, --help Display help and exit.\n\n" - , TEST_ROUNDS, WARM_UP_ROUNDS); + " -w, --warm-up Number of warm-up rounds, default=100, min=1\n" + " -h, --help Display help and exit.\n\n"); } /** @@ -631,16 +623,18 @@ static void parse_args(int argc, char *argv[], test_args_t *args) args->forward_mode = EVENT_FORWARD_RAND; args->num_group = 0; args->isolate = 0; - args->test_rounds = TEST_ROUNDS; - args->warm_up_rounds = WARM_UP_ROUNDS; + args->test_rounds = 10; + args->warm_up_rounds = 100; args->sync_type = ODP_SCHED_SYNC_PARALLEL; - args->sample_per_prio = SAMPLE_EVENT_PER_PRIO; - args->prio[LO_PRIO].queues = LO_PRIO_QUEUES; - args->prio[HI_PRIO].queues = HI_PRIO_QUEUES; - args->prio[LO_PRIO].events = LO_PRIO_EVENTS; - args->prio[HI_PRIO].events = HI_PRIO_EVENTS; - args->prio[LO_PRIO].events_per_queue = EVENTS_PER_LO_PRIO_QUEUE; - args->prio[HI_PRIO].events_per_queue = EVENTS_PER_HI_PRIO_QUEUE; + args->sample_per_prio = 0; + args->prio[LO_PRIO].queues = 64; + args->prio[HI_PRIO].queues = 16; + args->prio[LO_PRIO].events = 32; + args->prio[HI_PRIO].events = 0; + args->prio[LO_PRIO].events_per_queue = 1; + args->prio[HI_PRIO].events_per_queue = 0; + args->prio[LO_PRIO].sample_events = 0; + args->prio[HI_PRIO].sample_events = 1; while (1) { opt = getopt_long(argc, argv, shortopts, longopts, &long_index); @@ -737,6 +731,9 @@ static void parse_args(int argc, char *argv[], test_args_t *args) ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS); exit(EXIT_FAILURE); } + + if (args->prio[HI_PRIO].queues == 0 || args->sample_per_prio) + args->prio[LO_PRIO].sample_events = 1; } static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed) diff --git a/test/performance/odp_stress.c b/test/performance/odp_stress.c new file mode 100644 index 0000000000..15b44c113f --- /dev/null +++ b/test/performance/odp_stress.c @@ -0,0 +1,857 @@ +/* Copyright (c) 2022, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +typedef struct test_options_t { + uint32_t num_cpu; + uint64_t period_ns; + uint64_t rounds; + uint64_t mem_size; + int mode; + int group_mode; + +} test_options_t; + +typedef struct test_stat_t { + uint64_t rounds; + uint64_t tot_nsec; + uint64_t work_nsec; + +} test_stat_t; + +typedef struct test_stat_sum_t { + uint64_t rounds; + uint64_t tot_nsec; + uint64_t work_nsec; + +} test_stat_sum_t; + +typedef struct thread_arg_t { + void *global; + int worker_idx; + +} thread_arg_t; + +typedef struct test_global_t { + test_options_t test_options; + odp_atomic_u32_t exit_test; + odp_barrier_t barrier; + odp_cpumask_t cpumask; + odp_timer_pool_t timer_pool; + odp_pool_t tmo_pool; + uint64_t period_ticks; + uint8_t *worker_mem; + odp_timer_t timer[ODP_THREAD_COUNT_MAX]; + odp_queue_t tmo_queue[ODP_THREAD_COUNT_MAX]; + odp_schedule_group_t group[ODP_THREAD_COUNT_MAX]; + odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX]; + test_stat_t stat[ODP_THREAD_COUNT_MAX]; + thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX]; + test_stat_sum_t stat_sum; + +} test_global_t; + +test_global_t *test_global; + +static void print_usage(void) +{ + printf("\n" + "Stress test options:\n" + "\n" + " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1\n" + " -p, --period_ns Timeout period in nsec. Default: 1 sec\n" + " -r, --rounds Number of timeout rounds. Default: 10\n" + " -m, --mode Select test mode. Default: 1\n" + " 0: No stress, just wait for timeouts\n" + " 1: Memcpy\n" + " -s, --mem_size Memory size per worker in bytes. Default: 2048\n" + " -g, --group_mode Select schedule group mode: Default: 1\n" + " 0: Use GROUP_ALL group. Scheduler load balances timeout events.\n" + " 1: Create a group per CPU. Dedicated timeout event per CPU.\n" + " -h, --help This help\n" + "\n"); +} + +static int parse_options(int argc, char *argv[], test_options_t *test_options) +{ + int opt; + int long_index; + int ret = 0; + + static const struct option longopts[] = { + {"num_cpu", required_argument, NULL, 'c'}, + {"period_ns", required_argument, NULL, 'p'}, + {"rounds", required_argument, NULL, 'r'}, + {"mode", required_argument, NULL, 'm'}, + {"mem_size", required_argument, NULL, 's'}, + {"group_mode", required_argument, NULL, 'g'}, + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} + }; + + static const char *shortopts = "+c:p:r:m:s:g:h"; + + test_options->num_cpu = 1; + test_options->period_ns = 1000 * ODP_TIME_MSEC_IN_NS; + test_options->rounds = 10; + test_options->mode = 1; + test_options->mem_size = 2048; + test_options->group_mode = 1; + + while (1) { + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; + + switch (opt) { + case 'c': + test_options->num_cpu = atoi(optarg); + break; + case 'p': + test_options->period_ns = atoll(optarg); + break; + case 'r': + test_options->rounds = atoll(optarg); + break; + case 'm': + test_options->mode = atoi(optarg); + break; + case 's': + test_options->mem_size = atoll(optarg); + break; + case 'g': + test_options->group_mode = atoi(optarg); + break; + case 'h': + /* fall through */ + default: + print_usage(); + ret = -1; + break; + } + } + + if (test_options->mode) { + if (test_options->mem_size < 2) { + ODPH_ERR("Too small memory size\n"); + return -1; + } + } + + return ret; +} + +static int set_num_cpu(test_global_t *global) +{ + int ret; + test_options_t *test_options = &global->test_options; + int num_cpu = test_options->num_cpu; + + /* One thread used for the main thread */ + if (num_cpu < 0 || num_cpu > ODP_THREAD_COUNT_MAX - 1) { + ODPH_ERR("Bad number of workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1); + return -1; + } + + ret = odp_cpumask_default_worker(&global->cpumask, num_cpu); + + if (num_cpu && ret != num_cpu) { + ODPH_ERR("Too many workers. Max supported %i\n.", ret); + return -1; + } + + /* Zero: all available workers */ + if (num_cpu == 0) { + num_cpu = ret; + test_options->num_cpu = num_cpu; + } + + odp_barrier_init(&global->barrier, num_cpu + 1); + + return 0; +} + +static int join_group(test_global_t *global, int worker_idx, int thr) +{ + odp_thrmask_t thrmask; + odp_schedule_group_t group; + + odp_thrmask_zero(&thrmask); + odp_thrmask_set(&thrmask, thr); + group = global->group[worker_idx]; + + if (odp_schedule_group_join(group, &thrmask)) { + ODPH_ERR("Thread %i failed to join group %i\n", thr, worker_idx); + return -1; + } + + return 0; +} + +static int worker_thread(void *arg) +{ + int thr, timer_ret; + uint32_t exit_test; + odp_event_t ev; + odp_timeout_t tmo; + odp_timer_t timer; + uint64_t tot_nsec, work_sum, max_nsec; + odp_timer_start_t start_param; + odp_time_t t1, t2, max_time; + odp_time_t work_t1, work_t2; + uint8_t *src, *dst; + thread_arg_t *thread_arg = arg; + int worker_idx = thread_arg->worker_idx; + test_global_t *global = thread_arg->global; + test_options_t *test_options = &global->test_options; + int mode = test_options->mode; + uint64_t mem_size = test_options->mem_size; + uint64_t copy_size = mem_size / 2; + uint64_t rounds = 0; + int ret = 0; + uint32_t done = 0; + uint64_t wait = ODP_SCHED_WAIT; + + thr = odp_thread_id(); + max_nsec = 2 * test_options->rounds * test_options->period_ns; + max_time = odp_time_local_from_ns(max_nsec); + printf("Thread %i starting on CPU %i\n", thr, odp_cpu_id()); + + if (test_options->group_mode == 0) { + /* Timeout events are load balanced. Using this + * period to poll exit status. */ + wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS); + } else { + if (join_group(global, worker_idx, thr)) { + /* Join failed, exit after barrier */ + wait = ODP_SCHED_NO_WAIT; + done = 1; + } + } + + if (mode) { + src = global->worker_mem + worker_idx * mem_size; + dst = src + copy_size; + } + + start_param.tick_type = ODP_TIMER_TICK_REL; + start_param.tick = global->period_ticks; + + /* Start all workers at the same time */ + odp_barrier_wait(&global->barrier); + + work_sum = 0; + t1 = odp_time_local(); + max_time = odp_time_sum(t1, max_time); + + while (1) { + ev = odp_schedule(NULL, wait); + + exit_test = odp_atomic_load_u32(&global->exit_test); + exit_test += done; + + if (ev == ODP_EVENT_INVALID) { + odp_time_t cur_time = odp_time_local(); + + if (odp_time_cmp(cur_time, max_time) > 0) + exit_test += 1; + + if (exit_test) { + /* Exit loop without schedule context */ + break; + } + + continue; + } + + rounds++; + + if (rounds < test_options->rounds) { + tmo = odp_timeout_from_event(ev); + timer = odp_timeout_timer(tmo); + start_param.tmo_ev = ev; + + timer_ret = odp_timer_start(timer, &start_param); + + if (timer_ret != ODP_TIMER_SUCCESS) { + ODPH_ERR("Timer start failed (%" PRIu64 ")\n", rounds); + done = 1; + } + } else { + done = 1; + } + + /* Do work */ + if (mode) { + work_t1 = odp_time_local(); + + memcpy(dst, src, copy_size); + + work_t2 = odp_time_local(); + work_sum += odp_time_diff_ns(work_t2, work_t1); + } + + if (done) { + /* Stop timer and do not wait events */ + wait = ODP_SCHED_NO_WAIT; + odp_event_free(ev); + } + } + + t2 = odp_time_local(); + tot_nsec = odp_time_diff_ns(t2, t1); + + /* Update stats*/ + global->stat[thr].rounds = rounds; + global->stat[thr].tot_nsec = tot_nsec; + global->stat[thr].work_nsec = work_sum; + + return ret; +} + +static int start_workers(test_global_t *global, odp_instance_t instance) +{ + odph_thread_common_param_t thr_common; + int i, ret; + test_options_t *test_options = &global->test_options; + int num_cpu = test_options->num_cpu; + odph_thread_param_t thr_param[num_cpu]; + + memset(global->thread_tbl, 0, sizeof(global->thread_tbl)); + odph_thread_common_param_init(&thr_common); + + thr_common.instance = instance; + thr_common.cpumask = &global->cpumask; + + for (i = 0; i < num_cpu; i++) { + odph_thread_param_init(&thr_param[i]); + thr_param[i].start = worker_thread; + thr_param[i].arg = &global->thread_arg[i]; + thr_param[i].thr_type = ODP_THREAD_WORKER; + } + + ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param, num_cpu); + + if (ret != num_cpu) { + ODPH_ERR("Thread create failed %i\n", ret); + return -1; + } + + return 0; +} + +static int create_timers(test_global_t *global) +{ + odp_timer_capability_t timer_capa; + odp_timer_res_capability_t timer_res_capa; + odp_timer_pool_param_t timer_pool_param; + odp_timer_pool_t tp; + odp_pool_param_t pool_param; + odp_pool_t pool; + double duration; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + uint64_t period_ns = test_options->period_ns; + uint64_t res_ns = period_ns / 1000; + + if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) { + ODPH_ERR("Timer capability failed\n"); + return -1; + } + + if (timer_capa.queue_type_sched == 0) { + ODPH_ERR("Timer does not support sched queues\n"); + return -1; + } + + memset(&timer_res_capa, 0, sizeof(odp_timer_res_capability_t)); + timer_res_capa.max_tmo = 2 * period_ns; + if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &timer_res_capa)) { + ODPH_ERR("Timer resolution capability failed. Too long period.\n"); + return -1; + } + + if (res_ns < timer_res_capa.res_ns) + res_ns = timer_res_capa.res_ns; + + duration = test_options->rounds * (double)period_ns / ODP_TIME_SEC_IN_NS; + + printf(" num timers %u\n", num_cpu); + printf(" resolution %" PRIu64 " nsec\n", res_ns); + printf(" period %" PRIu64 " nsec\n", period_ns); + printf(" test duration %.2f sec\n", duration); + if (test_options->group_mode == 0) + printf(" force stop after %.2f sec\n", 2 * duration); + printf("\n"); + + odp_pool_param_init(&pool_param); + pool_param.type = ODP_POOL_TIMEOUT; + pool_param.tmo.num = num_cpu; + + pool = odp_pool_create("Timeout pool", &pool_param); + global->tmo_pool = pool; + if (pool == ODP_POOL_INVALID) { + ODPH_ERR("Pool create failed\n"); + return -1; + } + + odp_timer_pool_param_init(&timer_pool_param); + timer_pool_param.res_ns = res_ns; + timer_pool_param.min_tmo = period_ns / 2; + timer_pool_param.max_tmo = 2 * period_ns; + timer_pool_param.num_timers = 2 * num_cpu; /* extra for stop events */ + timer_pool_param.clk_src = ODP_CLOCK_DEFAULT; + + tp = odp_timer_pool_create("Stress timers", &timer_pool_param); + global->timer_pool = tp; + if (tp == ODP_TIMER_POOL_INVALID) { + ODPH_ERR("Timer pool create failed\n"); + return -1; + } + + odp_timer_pool_start(); + + global->period_ticks = odp_timer_ns_to_tick(tp, period_ns); + + return 0; +} + +static int create_queues(test_global_t *global) +{ + odp_schedule_capability_t sched_capa; + odp_thrmask_t thrmask; + odp_queue_param_t queue_param; + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + + if (odp_schedule_capability(&sched_capa)) { + ODPH_ERR("Schedule capability failed\n"); + return -1; + } + + if (test_options->group_mode) { + if ((sched_capa.max_groups - 1) < num_cpu) { + ODPH_ERR("Too many workers. Not enough schedule groups.\n"); + return -1; + } + + odp_thrmask_zero(&thrmask); + + /* A group per worker thread */ + for (i = 0; i < num_cpu; i++) { + global->group[i] = odp_schedule_group_create(NULL, &thrmask); + + if (global->group[i] == ODP_SCHED_GROUP_INVALID) { + ODPH_ERR("Schedule group create failed (%u)\n", i); + return -1; + } + } + } + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_SCHED; + queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL; + queue_param.sched.group = ODP_SCHED_GROUP_ALL; + + for (i = 0; i < num_cpu; i++) { + if (test_options->group_mode) + queue_param.sched.group = global->group[i]; + + global->tmo_queue[i] = odp_queue_create(NULL, &queue_param); + + if (global->tmo_queue[i] == ODP_QUEUE_INVALID) { + ODPH_ERR("Timeout dest queue create failed (%u)\n", i); + return -1; + } + } + + return 0; +} + +static int start_timers(test_global_t *global) +{ + odp_timer_start_t start_param; + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + odp_timeout_t tmo[num_cpu]; + odp_timer_t timer[num_cpu]; + + for (i = 0; i < num_cpu; i++) { + tmo[i] = odp_timeout_alloc(global->tmo_pool); + + if (tmo[i] == ODP_TIMEOUT_INVALID) { + ODPH_ERR("Timeout alloc failed (%u)\n", i); + return -1; + } + } + + for (i = 0; i < num_cpu; i++) { + timer[i] = odp_timer_alloc(global->timer_pool, global->tmo_queue[i], NULL); + + if (timer[i] == ODP_TIMER_INVALID) { + ODPH_ERR("Timer alloc failed (%u)\n", i); + return -1; + } + + global->timer[i] = timer[i]; + } + + start_param.tick_type = ODP_TIMER_TICK_REL; + start_param.tick = global->period_ticks; + + for (i = 0; i < num_cpu; i++) { + start_param.tmo_ev = odp_timeout_to_event(tmo[i]); + + if (odp_timer_start(timer[i], &start_param) != ODP_TIMER_SUCCESS) { + ODPH_ERR("Timer start failed (%u)\n", i); + return -1; + } + } + + return 0; +} + +static void destroy_timers(test_global_t *global) +{ + uint32_t i; + odp_event_t ev; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + + for (i = 0; i < num_cpu; i++) { + odp_timer_t timer = global->timer[i]; + + if (timer == ODP_TIMER_INVALID) + continue; + + ev = odp_timer_free(timer); + if (ev != ODP_EVENT_INVALID) + odp_event_free(ev); + } + + if (global->timer_pool != ODP_TIMER_POOL_INVALID) + odp_timer_pool_destroy(global->timer_pool); + + for (i = 0; i < num_cpu; i++) { + odp_queue_t queue = global->tmo_queue[i]; + + if (queue == ODP_QUEUE_INVALID) + continue; + + if (odp_queue_destroy(queue)) + ODPH_ERR("Queue destroy failed (%u)\n", i); + } + + if (test_options->group_mode) { + for (i = 0; i < num_cpu; i++) { + odp_schedule_group_t group = global->group[i]; + + if (group == ODP_SCHED_GROUP_INVALID) + continue; + + if (odp_schedule_group_destroy(group)) + ODPH_ERR("Schedule group destroy failed (%u)\n", i); + } + } + + if (global->tmo_pool != ODP_POOL_INVALID) + odp_pool_destroy(global->tmo_pool); +} + +static void sig_handler(int signo) +{ + (void)signo; + + if (test_global == NULL) + return; + + odp_atomic_add_u32(&test_global->exit_test, 1); +} + +static void stop_workers(test_global_t *global) +{ + uint32_t i; + odp_timeout_t tmo; + odp_event_t ev; + odp_queue_t queue; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + + odp_atomic_add_u32(&test_global->exit_test, 1); + + for (i = 0; i < num_cpu; i++) { + queue = global->tmo_queue[i]; + if (queue == ODP_QUEUE_INVALID) + continue; + + tmo = odp_timeout_alloc(global->tmo_pool); + + if (tmo == ODP_TIMEOUT_INVALID) + continue; + + ev = odp_timeout_to_event(tmo); + if (odp_queue_enq(queue, ev)) { + ODPH_ERR("Enqueue failed %u\n", i); + odp_event_free(ev); + } + } +} + +static void sum_stat(test_global_t *global) +{ + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + test_stat_sum_t *sum = &global->stat_sum; + + memset(sum, 0, sizeof(test_stat_sum_t)); + + for (i = 1; i < num_cpu + 1 ; i++) { + sum->rounds += global->stat[i].rounds; + sum->tot_nsec += global->stat[i].tot_nsec; + sum->work_nsec += global->stat[i].work_nsec; + } +} + +static void print_stat(test_global_t *global) +{ + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_cpu = test_options->num_cpu; + int mode = test_options->mode; + test_stat_sum_t *sum = &global->stat_sum; + double sec_ave, work_ave, perc; + double round_ave = 0.0; + double copy_ave = 0.0; + double copy_tot = 0.0; + double cpu_load = 0.0; + const double mega = 1000000.0; + const double giga = 1000000000.0; + uint32_t num = 0; + + if (num_cpu == 0) + return; + + sec_ave = (sum->tot_nsec / giga) / num_cpu; + work_ave = (sum->work_nsec / giga) / num_cpu; + + printf("\n"); + printf("CPU load from work (percent) per thread:\n"); + printf("----------------------------------------------\n"); + printf(" 1 2 3 4 5 6 7 8 9 10"); + + for (i = 1; i < num_cpu + 1; i++) { + if (global->stat[i].tot_nsec == 0) + continue; + + if ((num % 10) == 0) + printf("\n "); + + perc = 100.0 * ((double)global->stat[i].work_nsec) / global->stat[i].tot_nsec; + + printf("%6.2f ", perc); + num++; + } + + if (sec_ave != 0.0) { + round_ave = (double)sum->rounds / num_cpu; + cpu_load = 100.0 * (work_ave / sec_ave); + + if (mode) { + uint64_t copy_bytes = sum->rounds * test_options->mem_size / 2; + + copy_ave = copy_bytes / (sum->work_nsec / giga); + copy_tot = copy_ave * num_cpu; + } + } + + printf("\n\n"); + printf("TOTAL (%i workers)\n", num_cpu); + printf(" ave time: %.2f sec\n", sec_ave); + printf(" ave work: %.2f sec\n", work_ave); + printf(" ave CPU load: %.2f\n", cpu_load); + printf(" ave rounds per sec: %.2f\n", round_ave / sec_ave); + printf(" ave copy speed: %.2f MB/sec\n", copy_ave / mega); + printf(" total copy speed: %.2f MB/sec\n", copy_tot / mega); + printf("\n"); +} + +int main(int argc, char **argv) +{ + odph_helper_options_t helper_options; + odp_instance_t instance; + odp_init_t init; + odp_shm_t shm, shm_global; + odp_schedule_config_t sched_config; + test_global_t *global; + test_options_t *test_options; + int i, mode; + uint32_t num_cpu; + uint64_t mem_size; + odp_shm_t shm_work = ODP_SHM_INVALID; + + signal(SIGINT, sig_handler); + + /* Let helper collect its own arguments (e.g. --odph_proc) */ + argc = odph_parse_options(argc, argv); + if (odph_options(&helper_options)) { + ODPH_ERR("Reading ODP helper options failed.\n"); + exit(EXIT_FAILURE); + } + + odp_init_param_init(&init); + init.mem_model = helper_options.mem_model; + + if (odp_init_global(&instance, &init, NULL)) { + ODPH_ERR("Global init failed.\n"); + exit(EXIT_FAILURE); + } + + if (odp_init_local(instance, ODP_THREAD_CONTROL)) { + ODPH_ERR("Local init failed.\n"); + exit(EXIT_FAILURE); + } + + shm = odp_shm_reserve("Stress global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0); + shm_global = shm; + if (shm == ODP_SHM_INVALID) { + ODPH_ERR("SHM reserve failed.\n"); + exit(EXIT_FAILURE); + } + + global = odp_shm_addr(shm); + if (global == NULL) { + ODPH_ERR("SHM addr failed\n"); + exit(EXIT_FAILURE); + } + test_global = global; + + memset(global, 0, sizeof(test_global_t)); + odp_atomic_init_u32(&global->exit_test, 0); + + global->timer_pool = ODP_TIMER_POOL_INVALID; + global->tmo_pool = ODP_POOL_INVALID; + + for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) { + global->timer[i] = ODP_TIMER_INVALID; + global->tmo_queue[i] = ODP_QUEUE_INVALID; + global->group[i] = ODP_SCHED_GROUP_INVALID; + + global->thread_arg[i].global = global; + global->thread_arg[i].worker_idx = i; + } + + if (parse_options(argc, argv, &global->test_options)) + exit(EXIT_FAILURE); + + test_options = &global->test_options; + mode = test_options->mode; + + odp_sys_info_print(); + + odp_schedule_config_init(&sched_config); + sched_config.sched_group.all = 1; + sched_config.sched_group.control = 0; + sched_config.sched_group.worker = 0; + + odp_schedule_config(&sched_config); + + if (set_num_cpu(global)) + exit(EXIT_FAILURE); + + num_cpu = test_options->num_cpu; + + /* Memory for workers */ + if (mode) { + mem_size = test_options->mem_size * num_cpu; + + shm = odp_shm_reserve("Test memory", mem_size, ODP_CACHE_LINE_SIZE, 0); + shm_work = shm; + if (shm == ODP_SHM_INVALID) { + ODPH_ERR("SHM reserve failed.\n"); + exit(EXIT_FAILURE); + } + + global->worker_mem = odp_shm_addr(shm); + if (global->worker_mem == NULL) { + ODPH_ERR("SHM addr failed\n"); + exit(EXIT_FAILURE); + } + + memset(global->worker_mem, 0, mem_size); + } + + printf("\n"); + printf("Test parameters\n"); + printf(" num workers %u\n", num_cpu); + printf(" mode %i\n", mode); + printf(" group mode %i\n", test_options->group_mode); + printf(" mem size per worker %" PRIu64 " bytes\n", test_options->mem_size); + + if (create_timers(global)) + exit(EXIT_FAILURE); + + if (create_queues(global)) + exit(EXIT_FAILURE); + + /* Start worker threads */ + start_workers(global, instance); + + /* Wait until all workers are ready */ + odp_barrier_wait(&global->barrier); + + if (start_timers(global)) { + /* Stop all workers, if some timer did not start */ + ODPH_ERR("Timers did not start. Stopping workers.\n"); + stop_workers(global); + } + + /* Wait workers to exit */ + odph_thread_join(global->thread_tbl, num_cpu); + + sum_stat(global); + + print_stat(global); + + destroy_timers(global); + + if (mode) { + if (odp_shm_free(shm_work)) { + ODPH_ERR("SHM free failed.\n"); + exit(EXIT_FAILURE); + } + } + + if (odp_shm_free(shm_global)) { + ODPH_ERR("SHM free failed.\n"); + exit(EXIT_FAILURE); + } + + if (odp_term_local()) { + ODPH_ERR("Term local failed.\n"); + exit(EXIT_FAILURE); + } + + if (odp_term_global(instance)) { + ODPH_ERR("Term global failed.\n"); + exit(EXIT_FAILURE); + } + + return 0; +} diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c index d4329bc6b6..cbdd219fc2 100644 --- a/test/validation/api/atomic/atomic.c +++ b/test/validation/api/atomic/atomic.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c index ce52fd2d7e..e0665b7cf5 100644 --- a/test/validation/api/barrier/barrier.c +++ b/test/validation/api/barrier/barrier.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c index c3484e14aa..2d9632342b 100644 --- a/test/validation/api/buffer/buffer.c +++ b/test/validation/api/buffer/buffer.c @@ -75,6 +75,8 @@ static void test_pool_alloc_free(const odp_pool_param_t *param) odp_pool_print(pool); for (i = 0; i < num; i++) { + odp_buffer_t buf; + buffer[i] = odp_buffer_alloc(pool); if (buffer[i] == ODP_BUFFER_INVALID) @@ -88,6 +90,10 @@ static void test_pool_alloc_free(const odp_pool_param_t *param) ev = odp_buffer_to_event(buffer[i]); CU_ASSERT(odp_buffer_from_event(ev) == buffer[i]); + odp_buffer_to_event_multi(&buffer[i], &ev, 1); + odp_buffer_from_event_multi(&buf, &ev, 1); + CU_ASSERT(buf == buffer[i]); + if (odp_event_type(ev) != ODP_EVENT_BUFFER) wrong_type = true; if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE) @@ -147,12 +153,20 @@ static void test_pool_alloc_free_multi(const odp_pool_param_t *param) ret = 0; for (i = 0; i < num; i += ret) { + odp_buffer_t buf[BURST]; + odp_event_t event[BURST]; + ret = odp_buffer_alloc_multi(pool, &buffer[i], BURST); CU_ASSERT(ret >= 0); CU_ASSERT(ret <= BURST); if (ret <= 0) break; + + odp_buffer_to_event_multi(&buffer[i], event, ret); + odp_buffer_from_event_multi(buf, event, ret); + for (int j = 0; j < ret; j++) + CU_ASSERT(buf[j] == buffer[i + j]); } num_buf = i; diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c index f914ea2ec6..f7aa96e525 100644 --- a/test/validation/api/classification/odp_classification_basic.c +++ b/test/validation/api/classification/odp_classification_basic.c @@ -53,7 +53,9 @@ static void classification_test_create_cos(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(NULL, &cls_param); CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID)); @@ -124,7 +126,9 @@ static void classification_test_destroy_cos(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(name, &cls_param); CU_ASSERT_FATAL(cos != ODP_COS_INVALID); @@ -172,7 +176,9 @@ static void classification_test_create_pmr_match(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create("pmr_match", &cls_param); CU_ASSERT(cos != ODP_COS_INVALID); @@ -229,7 +235,9 @@ static void classification_test_cos_set_queue(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos_queue = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID); @@ -271,7 +279,9 @@ static void classification_test_cos_set_pool(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT_FATAL(cos != ODP_COS_INVALID); @@ -289,6 +299,8 @@ static void classification_test_cos_set_pool(void) odp_pool_destroy(cos_pool); } +#if ODP_DEPRECATED_API + static void classification_test_cos_set_drop(void) { int retval; @@ -324,6 +336,8 @@ static void classification_test_cos_set_drop(void) odp_queue_destroy(queue); } +#endif + static void classification_test_pmr_composite_create(void) { odp_pmr_t pmr_composite; @@ -360,7 +374,9 @@ static void classification_test_pmr_composite_create(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create("pmr_match", &cls_param); CU_ASSERT(cos != ODP_COS_INVALID); @@ -446,7 +462,9 @@ odp_testinfo_t classification_suite_basic[] = { ODP_TEST_INFO(classification_test_destroy_cos), ODP_TEST_INFO(classification_test_create_pmr_match), ODP_TEST_INFO(classification_test_cos_set_queue), +#if ODP_DEPRECATED_API ODP_TEST_INFO(classification_test_cos_set_drop), +#endif ODP_TEST_INFO(classification_test_cos_set_pool), ODP_TEST_INFO(classification_test_pmr_composite_create), ODP_TEST_INFO_CONDITIONAL(classification_test_create_cos_with_hash_queues, diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c index 280e525af6..0b29783c03 100644 --- a/test/validation/api/classification/odp_classification_test_pmr.c +++ b/test/validation/api/classification/odp_classification_test_pmr.c @@ -77,7 +77,9 @@ void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos, odp_cls_cos_param_init(&cls_param); cls_param.pool = default_pool; cls_param.queue = default_queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif default_cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT(default_cos != ODP_COS_INVALID); @@ -151,7 +153,9 @@ static void classification_test_pktin_classifier_flag(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT(cos != ODP_COS_INVALID); @@ -245,7 +249,9 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT(cos != ODP_COS_INVALID); @@ -418,7 +424,9 @@ static void test_pmr(const odp_pmr_param_t *pmr_param, odp_packet_t pkt, odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create("PMR test cos", &cls_param); CU_ASSERT_FATAL(cos != ODP_COS_INVALID); @@ -689,7 +697,9 @@ static void classification_test_pmr_term_dmac(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT_FATAL(cos != ODP_COS_INVALID); @@ -1027,7 +1037,9 @@ static void classification_test_pmr_pool_set(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT_FATAL(cos != ODP_COS_INVALID); @@ -1126,7 +1138,9 @@ static void classification_test_pmr_queue_set(void) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos = odp_cls_cos_create(cosname, &cls_param); CU_ASSERT_FATAL(cos != ODP_COS_INVALID); @@ -1427,7 +1441,9 @@ static void test_pmr_series(const int num_udp, int marking) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue_ip; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos_ip = odp_cls_cos_create("cos_ip", &cls_param); CU_ASSERT_FATAL(cos_ip != ODP_COS_INVALID); @@ -1469,7 +1485,9 @@ static void test_pmr_series(const int num_udp, int marking) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool; cls_param.queue = queue_udp[i]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif cos_udp[i] = odp_cls_cos_create(name, &cls_param); CU_ASSERT_FATAL(cos_udp[i] != ODP_COS_INVALID); diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c index 4511fc1d71..962885c06b 100644 --- a/test/validation/api/classification/odp_classification_tests.c +++ b/test/validation/api/classification/odp_classification_tests.c @@ -19,7 +19,10 @@ static odp_pool_t pool_default; static odp_pktio_t pktio_loop; static odp_pktio_capability_t pktio_capa; static odp_cls_testcase_u tc; + +#ifdef ODP_DEPRECATED static int global_num_l2_qos; +#endif #define NUM_COS_PMR_CHAIN 2 #define NUM_COS_DEFAULT 1 @@ -259,7 +262,9 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_list[CLS_PMR_CHAIN_SRC]; cls_param.queue = queue_list[CLS_PMR_CHAIN_SRC]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif if (enable_pktv) { cls_param.vector.enable = true; @@ -289,7 +294,9 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_list[CLS_PMR_CHAIN_DST]; cls_param.queue = queue_list[CLS_PMR_CHAIN_DST]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif if (enable_pktv) { cls_param.vector.enable = true; @@ -409,7 +416,9 @@ void configure_pktio_default_cos(odp_bool_t enable_pktv) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_list[CLS_DEFAULT]; cls_param.queue = queue_list[CLS_DEFAULT]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif if (enable_pktv) { cls_param.vector.enable = true; @@ -623,7 +632,9 @@ void configure_pktio_error_cos(odp_bool_t enable_pktv) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_list[CLS_ERROR]; cls_param.queue = queue_list[CLS_ERROR]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif if (enable_pktv) { cls_param.vector.enable = true; @@ -698,6 +709,8 @@ static void classification_test_pktio_set_headroom(void) CU_ASSERT(retval < 0); } +#ifdef ODP_DEPRECATED + void configure_cos_with_l2_priority(odp_bool_t enable_pktv) { uint8_t num_qos = CLS_L2_QOS_MAX; @@ -798,6 +811,8 @@ void test_cos_with_l2_priority(odp_bool_t enable_pktv) } } +#endif + void configure_pmr_cos(odp_bool_t enable_pktv) { uint16_t val; @@ -827,7 +842,9 @@ void configure_pmr_cos(odp_bool_t enable_pktv) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_list[CLS_PMR]; cls_param.queue = queue_list[CLS_PMR]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif if (enable_pktv) { cls_param.vector.enable = true; @@ -910,7 +927,9 @@ void configure_pktio_pmr_composite(odp_bool_t enable_pktv) odp_cls_cos_param_init(&cls_param); cls_param.pool = pool_list[CLS_PMR_SET]; cls_param.queue = queue_list[CLS_PMR_SET]; +#if ODP_DEPRECATED_API cls_param.drop_policy = ODP_COS_DROP_POOL; +#endif if (enable_pktv) { cls_param.vector.enable = true; @@ -1010,11 +1029,13 @@ static void classification_test_pktio_configure_common(odp_bool_t enable_pktv) tc.pmr_chain = 1; num_cos -= NUM_COS_PMR_CHAIN; } +#ifdef ODP_DEPRECATED if (num_cos >= NUM_COS_L2_PRIO && TEST_L2_QOS) { configure_cos_with_l2_priority(enable_pktv); tc.l2_priority = 1; num_cos -= NUM_COS_L2_PRIO; } +#endif if (num_cos >= NUM_COS_PMR && TEST_PMR) { configure_pmr_cos(enable_pktv); tc.pmr_cos = 1; @@ -1049,8 +1070,10 @@ static void classification_test_pktio_test_common(odp_bool_t enable_pktv) test_pktio_error_cos(enable_pktv); if (tc.pmr_chain && TEST_PMR_CHAIN) test_cls_pmr_chain(enable_pktv); +#ifdef ODP_DEPRECATED if (tc.l2_priority && TEST_L2_QOS) test_cos_with_l2_priority(enable_pktv); +#endif if (tc.pmr_cos && TEST_PMR) test_pmr_cos(enable_pktv); if (tc.pmr_composite_cos && TEST_PMR_SET) diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h index 8fa43099d4..06e98d4cbb 100644 --- a/test/validation/api/classification/odp_classification_testsuites.h +++ b/test/validation/api/classification/odp_classification_testsuites.h @@ -40,7 +40,9 @@ typedef union odp_cls_testcase { uint32_t drop_cos:1; uint32_t error_cos:1; uint32_t pmr_chain:1; +#ifdef ODP_DEPRECATED uint32_t l2_priority:1; +#endif uint32_t pmr_cos:1; uint32_t pmr_composite_cos:1; }; diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c index 150a470ec1..10f1b5ee29 100644 --- a/test/validation/api/crypto/odp_crypto_test_inp.c +++ b/test/validation/api/crypto/odp_crypto_test_inp.c @@ -7,7 +7,6 @@ #include #include -#include #include #include "test_vectors.h" diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c index c98afae4e1..11860bf9b4 100644 --- a/test/validation/api/ipsec/ipsec.c +++ b/test/validation/api/ipsec/ipsec.c @@ -16,11 +16,24 @@ #include "test_vectors.h" #include "reass_test_vectors.h" +#define EVENT_BUFFER_SIZE 3 + +struct buffered_event_s { + odp_queue_t from; + odp_event_t event; +}; + +static struct buffered_event_s sched_ev_buffer[EVENT_BUFFER_SIZE]; struct suite_context_s suite_context; static odp_ipsec_capability_t capa; +static int sched_ev_buffer_tail; +odp_bool_t sa_expiry_notified; + #define PKT_POOL_NUM 64 #define EVENT_WAIT_TIME ODP_TIME_SEC_IN_NS +#define STATUS_EVENT_WAIT_TIME ODP_TIME_MSEC_IN_NS +#define SCHED_EVENT_RETRY_COUNT 2 #define PACKET_USER_PTR ((void *)0x1212fefe) #define IPSEC_SA_CTX ((void *)0xfefefafa) @@ -101,11 +114,68 @@ static int pktio_start(odp_pktio_t pktio, odp_bool_t in, odp_bool_t out) return 1; } -static odp_event_t sched_queue_deq(uint64_t wait_ns) +static int sched_event_buffer_add(odp_queue_t from, odp_event_t event) +{ + if (sched_ev_buffer_tail + 1 == EVENT_BUFFER_SIZE) + return -ENOMEM; + + sched_ev_buffer[sched_ev_buffer_tail].from = from; + sched_ev_buffer[sched_ev_buffer_tail].event = event; + sched_ev_buffer_tail++; + + return 0; +} + +static odp_event_t sched_event_buffer_get(odp_queue_t from) +{ + odp_event_t ev; + int i, j; + + if (odp_queue_type(from) == ODP_QUEUE_TYPE_PLAIN) + return ODP_EVENT_INVALID; + + /* Look for a matching entry */ + for (i = 0; i < sched_ev_buffer_tail; i++) + if (sched_ev_buffer[i].from == from) + break; + + /* Remove entry from buffer */ + if (i != sched_ev_buffer_tail) { + ev = sched_ev_buffer[i].event; + + for (j = 1; i + j < sched_ev_buffer_tail; j++) + sched_ev_buffer[i + j - 1] = sched_ev_buffer[i + j]; + + sched_ev_buffer_tail--; + } else { + ev = ODP_EVENT_INVALID; + } + + return ev; +} + +static odp_event_t sched_queue_deq(odp_queue_t queue, uint64_t wait_ns) { uint64_t wait = odp_schedule_wait_time(wait_ns); + odp_event_t ev = ODP_EVENT_INVALID; + odp_queue_t from; + int retry = 0; - return odp_schedule(NULL, wait); + /* Check if buffered events are available */ + ev = sched_event_buffer_get(queue); + if (ODP_EVENT_INVALID != ev) + return ev; + + do { + ev = odp_schedule(&from, wait); + + if ((ev != ODP_EVENT_INVALID) && (from != queue)) { + CU_ASSERT_FATAL(0 == sched_event_buffer_add(from, ev)); + ev = ODP_EVENT_INVALID; + } + } while (ev == ODP_EVENT_INVALID && (++retry < SCHED_EVENT_RETRY_COUNT)); + + return ev; } static odp_event_t plain_queue_deq(odp_queue_t queue, uint64_t wait_ns) @@ -131,7 +201,7 @@ static odp_event_t recv_event(odp_queue_t queue, uint64_t wait_ns) if (odp_queue_type(queue) == ODP_QUEUE_TYPE_PLAIN) event = plain_queue_deq(queue, wait_ns); else - event = sched_queue_deq(wait_ns); + event = sched_queue_deq(queue, wait_ns); return event; } @@ -370,6 +440,50 @@ void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param, param->lifetime.hard_limit.packets = 10000 * 1000; } +static void ipsec_status_event_handle(odp_event_t ev_status, + odp_ipsec_sa_t sa, + enum ipsec_test_sa_expiry sa_expiry) +{ + odp_ipsec_status_t status = { + .id = 0, + .sa = ODP_IPSEC_SA_INVALID, + .result = 0, + .warn.all = 0, + }; + + CU_ASSERT_FATAL(ODP_EVENT_INVALID != ev_status); + CU_ASSERT_EQUAL(1, odp_event_is_valid(ev_status)); + CU_ASSERT_EQUAL_FATAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(ev_status)); + + CU_ASSERT_EQUAL(0, odp_ipsec_status(&status, ev_status)); + CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_WARN, status.id); + CU_ASSERT_EQUAL(sa, status.sa); + CU_ASSERT_EQUAL(0, status.result); + + if (IPSEC_TEST_EXPIRY_IGNORED != sa_expiry) { + if (IPSEC_TEST_EXPIRY_SOFT_PKT == sa_expiry) { + CU_ASSERT_EQUAL(1, status.warn.soft_exp_packets); + sa_expiry_notified = true; + } else if (IPSEC_TEST_EXPIRY_SOFT_BYTE == sa_expiry) { + CU_ASSERT_EQUAL(1, status.warn.soft_exp_bytes); + sa_expiry_notified = true; + } + } + + odp_event_free(ev_status); +} + +void ipsec_status_event_get(odp_ipsec_sa_t sa, + enum ipsec_test_sa_expiry sa_expiry) +{ + uint64_t wait_time = (sa_expiry == IPSEC_TEST_EXPIRY_IGNORED) ? 0 : STATUS_EVENT_WAIT_TIME; + odp_event_t ev; + + ev = recv_event(suite_context.queue, wait_time); + if (ODP_EVENT_INVALID != ev) + ipsec_status_event_handle(ev, sa, sa_expiry); +} + void ipsec_sa_destroy(odp_ipsec_sa_t sa) { odp_event_t event; @@ -698,6 +812,45 @@ static int ipsec_process_in(const ipsec_test_part *part, return num_out; } +static int ipsec_check_sa_expiry(enum ipsec_test_sa_expiry sa_expiry, + odp_ipsec_packet_result_t *result) +{ + if (sa_expiry == IPSEC_TEST_EXPIRY_IGNORED) + return 0; + + if (!sa_expiry_notified) { + if (sa_expiry == IPSEC_TEST_EXPIRY_SOFT_PKT) { + if (result->status.warn.soft_exp_packets) + sa_expiry_notified = true; + } else if (sa_expiry == IPSEC_TEST_EXPIRY_SOFT_BYTE) { + if (result->status.warn.soft_exp_bytes) + sa_expiry_notified = true; + } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_PKT) { + if (result->status.error.hard_exp_packets) + sa_expiry_notified = true; + + return -1; + } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_BYTE) { + if (result->status.error.hard_exp_bytes) + sa_expiry_notified = true; + + return -1; + } + } else { + if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_PKT) { + CU_ASSERT(result->status.error.hard_exp_packets); + + return -1; + } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_BYTE) { + CU_ASSERT(result->status.error.hard_exp_bytes); + + return -1; + } + } + + return 0; +} + static int ipsec_send_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa, odp_packet_t *pkto) @@ -824,18 +977,39 @@ static int ipsec_send_out_one(const ipsec_test_part *part, pkto[i] = odp_packet_from_event(ev); CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID); + + if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE) + ipsec_status_event_get(sa, part->out[i].sa_expiry); + i++; continue; } ev = recv_event(suite_context.queue, 0); if (ODP_EVENT_INVALID != ev) { + odp_event_type_t ev_type; + CU_ASSERT(odp_event_is_valid(ev) == 1); + ev_type = odp_event_types(ev, &subtype); + + if ((ODP_EVENT_IPSEC_STATUS == ev_type) && + part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE) { + ipsec_status_event_handle(ev, sa, part->out[i].sa_expiry); + continue; + } + CU_ASSERT_EQUAL(ODP_EVENT_PACKET, - odp_event_types(ev, &subtype)); + ev_type); CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype); - CU_ASSERT(part->out[i].status.error.all); + + /* In the case of SA hard expiry tests, hard expiry error bits are + * expected to be set. The exact error bits expected to be set based + * on sa_expiry is checked eventually in ipsec_check_sa_expiry() + * from the caller of this function. + */ + if (part->out[i].sa_expiry == IPSEC_TEST_EXPIRY_NONE) + CU_ASSERT(part->out[i].status.error.all); pkto[i] = odp_ipsec_packet_from_event(ev); CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID); @@ -924,6 +1098,8 @@ static void verify_in(const ipsec_test_part *part, ODP_IPSEC_OP_MODE_INLINE, result.flag.inline_mode); CU_ASSERT_EQUAL(sa, result.sa); + CU_ASSERT_EQUAL(part->out[i].status.warn.all, + result.status.warn.all); if (ODP_IPSEC_SA_INVALID != sa) CU_ASSERT_EQUAL(IPSEC_SA_CTX, odp_ipsec_sa_context(sa)); @@ -999,6 +1175,11 @@ int ipsec_check_out(const ipsec_test_part *part, odp_ipsec_sa_t sa, } else { /* IPsec packet */ CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i])); + + if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE) + if (ipsec_check_sa_expiry(part->out[i].sa_expiry, &result) != 0) + return num_out; + CU_ASSERT_EQUAL(part->out[i].status.error.all, result.status.error.all); if (0 == result.status.error.all) diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h index a2bb478a3b..1c17693f7f 100644 --- a/test/validation/api/ipsec/ipsec.h +++ b/test/validation/api/ipsec/ipsec.h @@ -72,6 +72,15 @@ enum ipsec_test_stats { IPSEC_TEST_STATS_AUTH_ERR, }; +enum ipsec_test_sa_expiry { + IPSEC_TEST_EXPIRY_NONE = 0, + IPSEC_TEST_EXPIRY_IGNORED, + IPSEC_TEST_EXPIRY_SOFT_BYTE, + IPSEC_TEST_EXPIRY_SOFT_PKT, + IPSEC_TEST_EXPIRY_HARD_BYTE, + IPSEC_TEST_EXPIRY_HARD_PKT, +}; + typedef struct { odp_bool_t lookup; odp_bool_t inline_hdr_in_packet; @@ -101,9 +110,12 @@ typedef struct { * differs from that of input test packet (pkt_in). */ uint32_t orig_ip_len; + enum ipsec_test_sa_expiry sa_expiry; } out[MAX_FRAGS]; } ipsec_test_part; +extern odp_bool_t sa_expiry_notified; + void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param, odp_ipsec_dir_t dir, odp_ipsec_protocol_t proto, @@ -151,5 +163,7 @@ int ipsec_check_test_sa_update_seq_num(void); int ipsec_check_esp_aes_gcm_128_reass_ipv4(void); int ipsec_check_esp_aes_gcm_128_reass_ipv6(void); int ipsec_check_esp_null_aes_xcbc(void); +void ipsec_status_event_get(odp_ipsec_sa_t sa, + enum ipsec_test_sa_expiry sa_expiry); #endif diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c index 236997735f..bb318edadd 100644 --- a/test/validation/api/ipsec/ipsec_test_out.c +++ b/test/validation/api/ipsec/ipsec_test_out.c @@ -1593,6 +1593,138 @@ static void test_test_sa_update_seq_num(void) printf("\n "); } +#define SOFT_LIMIT_PKT_CNT 1024 +#define HARD_LIMIT_PKT_CNT 2048 +#define DELTA_PKT_CNT 320 + +static void test_out_ipv4_esp_sa_expiry(enum ipsec_test_sa_expiry expiry) +{ + int byte_count_per_packet = pkt_ipv4_icmp_0.len - pkt_ipv4_icmp_0.l3_offset; + uint32_t src = IPV4ADDR(10, 0, 11, 2); + uint32_t dst = IPV4ADDR(10, 0, 22, 2); + odp_ipsec_tunnel_param_t out_tunnel; + odp_ipsec_sa_param_t param_out; + int i, inc, limit, delta; + uint64_t soft_limit_byte; + uint64_t hard_limit_byte; + uint64_t soft_limit_pkt; + uint64_t hard_limit_pkt; + odp_ipsec_sa_t out_sa; + + switch (expiry) { + case IPSEC_TEST_EXPIRY_SOFT_PKT: + soft_limit_pkt = SOFT_LIMIT_PKT_CNT; + hard_limit_pkt = HARD_LIMIT_PKT_CNT; + soft_limit_byte = 0; + hard_limit_byte = 0; + delta = DELTA_PKT_CNT; + limit = soft_limit_pkt; + inc = 1; + break; + case IPSEC_TEST_EXPIRY_HARD_PKT: + soft_limit_pkt = SOFT_LIMIT_PKT_CNT; + hard_limit_pkt = HARD_LIMIT_PKT_CNT; + soft_limit_byte = 0; + hard_limit_byte = 0; + delta = DELTA_PKT_CNT; + limit = hard_limit_pkt; + inc = 1; + break; + case IPSEC_TEST_EXPIRY_SOFT_BYTE: + soft_limit_pkt = 0; + hard_limit_pkt = 0; + soft_limit_byte = byte_count_per_packet * SOFT_LIMIT_PKT_CNT; + hard_limit_byte = byte_count_per_packet * HARD_LIMIT_PKT_CNT; + delta = byte_count_per_packet * DELTA_PKT_CNT; + limit = soft_limit_byte; + inc = byte_count_per_packet; + break; + case IPSEC_TEST_EXPIRY_HARD_BYTE: + soft_limit_pkt = 0; + hard_limit_pkt = 0; + soft_limit_byte = byte_count_per_packet * SOFT_LIMIT_PKT_CNT; + hard_limit_byte = byte_count_per_packet * HARD_LIMIT_PKT_CNT; + delta = byte_count_per_packet * DELTA_PKT_CNT; + limit = hard_limit_byte; + inc = byte_count_per_packet; + break; + default: + return; + } + + memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t)); + + out_tunnel.type = ODP_IPSEC_TUNNEL_IPV4; + out_tunnel.ipv4.src_addr = &src; + out_tunnel.ipv4.dst_addr = &dst; + + ipsec_sa_param_fill(¶m_out, ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, + 0x4a2cbfe7, &out_tunnel, + ODP_CIPHER_ALG_AES_CBC, &key_a5_128, + ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160, + NULL, NULL); + + param_out.lifetime.soft_limit.bytes = soft_limit_byte; + param_out.lifetime.hard_limit.bytes = hard_limit_byte; + param_out.lifetime.soft_limit.packets = soft_limit_pkt; + param_out.lifetime.hard_limit.packets = hard_limit_pkt; + + out_sa = odp_ipsec_sa_create(¶m_out); + CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa); + + ipsec_test_part test_out = { + .pkt_in = &pkt_ipv4_icmp_0, + .num_pkt = 1, + .out = { + { .status.warn.all = 0, + .status.error.all = 0, + .l3_type = ODP_PROTO_L3_TYPE_IPV4, + .l4_type = ODP_PROTO_L4_TYPE_ESP, + }, + }, + }; + + test_out.out[0].sa_expiry = IPSEC_TEST_EXPIRY_IGNORED; + + for (i = 0; i < limit - delta; i += inc) + ipsec_check_out_one(&test_out, out_sa); + + sa_expiry_notified = false; + test_out.out[0].sa_expiry = expiry; + + for (; i <= limit && !sa_expiry_notified; i += inc) + ipsec_check_out_one(&test_out, out_sa); + + CU_ASSERT(sa_expiry_notified); + + for (; i <= limit + delta; i += inc) + ipsec_check_out_one(&test_out, out_sa); + + ipsec_sa_destroy(out_sa); +} + +static void test_out_ipv4_esp_sa_pkt_expiry(void) +{ + printf("\n IPv4 IPsec SA packet soft expiry"); + test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_SOFT_PKT); + + printf("\n IPv4 IPsec SA packet hard expiry"); + test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_HARD_PKT); + + printf("\n"); +} + +static void test_out_ipv4_esp_sa_byte_expiry(void) +{ + printf("\n IPv4 IPsec SA byte soft expiry"); + test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_SOFT_BYTE); + + printf("\n IPv4 IPsec SA byte hard expiry"); + test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_HARD_BYTE); + + printf("\n"); +} + static void ipsec_test_capability(void) { odp_ipsec_capability_t capa; @@ -1900,6 +2032,10 @@ odp_testinfo_t ipsec_out_suite[] = { ipsec_check_esp_null_aes_xcbc), ODP_TEST_INFO_CONDITIONAL(test_sa_info, ipsec_check_esp_aes_cbc_128_sha1), + ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_sa_pkt_expiry, + ipsec_check_esp_aes_cbc_128_sha1), + ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_sa_byte_expiry, + ipsec_check_esp_aes_cbc_128_sha1), ODP_TEST_INFO_CONDITIONAL(test_test_sa_update_seq_num, ipsec_check_test_sa_update_seq_num), ODP_TEST_INFO(test_esp_out_in_all_basic), diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c index bf9318e76a..729994d66b 100644 --- a/test/validation/api/lock/lock.c +++ b/test/validation/api/lock/lock.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c index 2eb1a5a284..446411b22d 100644 --- a/test/validation/api/pktio/pktio.c +++ b/test/validation/api/pktio/pktio.c @@ -1605,6 +1605,7 @@ static void test_defaults(uint8_t fill) CU_ASSERT_EQUAL(qp_in.hash_enable, 0); CU_ASSERT_EQUAL(qp_in.hash_proto.all_bits, 0); CU_ASSERT_EQUAL(qp_in.num_queues, 1); + CU_ASSERT_EQUAL(qp_in.queue_size[0], 0); CU_ASSERT_EQUAL(qp_in.queue_param.enq_mode, ODP_QUEUE_OP_MT); CU_ASSERT_EQUAL(qp_in.queue_param.sched.prio, odp_schedule_default_prio()); CU_ASSERT_EQUAL(qp_in.queue_param.sched.sync, ODP_SCHED_SYNC_PARALLEL); @@ -1749,6 +1750,8 @@ static void pktio_test_pktio_config(void) CU_ASSERT(!config.reassembly.en_ipv6); CU_ASSERT(config.reassembly.max_wait_time == 0); CU_ASSERT(config.reassembly.max_num_frags == 2); + CU_ASSERT(config.flow_control.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF); + CU_ASSERT(config.flow_control.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF); /* Indicate packet refs might be used */ config.pktout.bit.no_packet_refs = 0; @@ -1822,11 +1825,13 @@ static void pktio_test_link_info(void) link_info.duplex == ODP_PKTIO_LINK_DUPLEX_HALF || link_info.duplex == ODP_PKTIO_LINK_DUPLEX_FULL); CU_ASSERT(link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_UNKNOWN || + link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF || link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_ON || - link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF); + link_info.pause_rx == ODP_PKTIO_LINK_PFC_ON); CU_ASSERT(link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_UNKNOWN || + link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF || link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_ON || - link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF); + link_info.pause_tx == ODP_PKTIO_LINK_PFC_ON); CU_ASSERT(link_info.status == ODP_PKTIO_LINK_STATUS_UNKNOWN || link_info.status == ODP_PKTIO_LINK_STATUS_UP || link_info.status == ODP_PKTIO_LINK_STATUS_DOWN); @@ -1838,6 +1843,242 @@ static void pktio_test_link_info(void) } } +static int pktio_check_flow_control(int pfc, int rx) +{ + odp_pktio_t pktio; + odp_pktio_capability_t capa; + odp_pktio_param_t pktio_param; + int ret; + + odp_pktio_param_init(&pktio_param); + pktio_param.in_mode = ODP_PKTIN_MODE_SCHED; + + pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param); + if (pktio == ODP_PKTIO_INVALID) + return ODP_TEST_INACTIVE; + + ret = odp_pktio_capability(pktio, &capa); + (void)odp_pktio_close(pktio); + + if (ret < 0) + return ODP_TEST_INACTIVE; + + if (pfc == 0 && rx == 1 && capa.flow_control.pause_rx == 1) + return ODP_TEST_ACTIVE; + + if (pfc == 1 && rx == 1 && capa.flow_control.pfc_rx == 1) + return ODP_TEST_ACTIVE; + + if (pfc == 0 && rx == 0 && capa.flow_control.pause_tx == 1) + return ODP_TEST_ACTIVE; + + if (pfc == 1 && rx == 0 && capa.flow_control.pfc_tx == 1) + return ODP_TEST_ACTIVE; + + return ODP_TEST_INACTIVE; +} + +static int pktio_check_pause_rx(void) +{ + return pktio_check_flow_control(0, 1); +} + +static int pktio_check_pause_tx(void) +{ + return pktio_check_flow_control(0, 0); +} + +static int pktio_check_pause_both(void) +{ + int rx = pktio_check_pause_rx(); + int tx = pktio_check_pause_tx(); + + if (rx == ODP_TEST_ACTIVE && tx == ODP_TEST_ACTIVE) + return ODP_TEST_ACTIVE; + + return ODP_TEST_INACTIVE; +} + +static int pktio_check_pfc_rx(void) +{ + return pktio_check_flow_control(1, 1); +} + +static int pktio_check_pfc_tx(void) +{ + return pktio_check_flow_control(1, 0); +} + +static int pktio_check_pfc_both(void) +{ + int rx = pktio_check_pfc_rx(); + int tx = pktio_check_pfc_tx(); + + if (rx == ODP_TEST_ACTIVE && tx == ODP_TEST_ACTIVE) + return ODP_TEST_ACTIVE; + + return ODP_TEST_INACTIVE; +} + +static odp_cos_t set_default_cos(odp_pktio_t pktio, odp_queue_t queue) +{ + odp_cls_cos_param_t cos_param; + odp_cos_t cos; + int ret; + + odp_cls_cos_param_init(&cos_param); + cos_param.queue = queue; + cos_param.pool = pool[0]; + + cos = odp_cls_cos_create("Default CoS", &cos_param); + CU_ASSERT_FATAL(cos != ODP_COS_INVALID); + + ret = odp_pktio_default_cos_set(pktio, cos); + CU_ASSERT_FATAL(ret == 0); + + return cos; +} + +static odp_cos_t create_pfc_cos(odp_cos_t default_cos, odp_queue_t queue, odp_pmr_t *pmr_out) +{ + odp_cls_cos_param_t cos_param; + odp_cos_t cos; + odp_pmr_param_t pmr_param; + odp_pmr_t pmr; + uint8_t pcp = 1; + uint8_t mask = 0x7; + + /* Setup a CoS to control generation of PFC frame generation. PFC for the VLAN + * priority level is generated when queue/pool resource usage gets above 80%. */ + odp_cls_cos_param_init(&cos_param); + cos_param.queue = queue; + cos_param.pool = pool[0]; + cos_param.bp.enable = 1; + cos_param.bp.threshold.type = ODP_THRESHOLD_PERCENT; + cos_param.bp.threshold.percent.max = 80; + cos_param.bp.pfc_level = pcp; + + cos = odp_cls_cos_create("PFC CoS", &cos_param); + CU_ASSERT_FATAL(cos != ODP_COS_INVALID); + + odp_cls_pmr_param_init(&pmr_param); + pmr_param.term = ODP_PMR_VLAN_PCP_0; + pmr_param.match.value = &pcp; + pmr_param.match.mask = &mask; + pmr_param.val_sz = 1; + + pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos); + CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID); + + *pmr_out = pmr; + + return cos; +} + +static void pktio_config_flow_control(int pfc, int rx, int tx) +{ + odp_pktio_t pktio; + odp_pktio_config_t config; + int ret; + odp_cos_t default_cos = ODP_COS_INVALID; + odp_cos_t cos = ODP_COS_INVALID; + odp_pmr_t pmr = ODP_PMR_INVALID; + odp_queue_t queue = ODP_QUEUE_INVALID; + odp_pktio_link_pause_t mode = ODP_PKTIO_LINK_PAUSE_ON; + + pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT); + CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID); + + odp_pktio_config_init(&config); + + if (pfc) + mode = ODP_PKTIO_LINK_PFC_ON; + + if (rx) + config.flow_control.pause_rx = mode; + + if (tx) + config.flow_control.pause_tx = mode; + + ret = odp_pktio_config(pktio, &config); + CU_ASSERT_FATAL(ret == 0); + + if (pfc && tx) { + /* Enable classifier for PFC backpressure configuration. Overrides previous + * pktin queue config. */ + odp_pktin_queue_param_t pktin_param; + + odp_pktin_queue_param_init(&pktin_param); + + pktin_param.classifier_enable = 1; + + ret = odp_pktin_queue_config(pktio, &pktin_param); + CU_ASSERT_FATAL(ret == 0); + } + + ret = odp_pktio_start(pktio); + CU_ASSERT(ret == 0); + + if (pfc && tx) { + odp_queue_param_t qparam; + + odp_queue_param_init(&qparam); + qparam.type = ODP_QUEUE_TYPE_SCHED; + + queue = odp_queue_create("CoS queue", &qparam); + CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); + + default_cos = set_default_cos(pktio, queue); + + cos = create_pfc_cos(default_cos, queue, &pmr); + } + + if (pmr != ODP_PMR_INVALID) + odp_cls_pmr_destroy(pmr); + + if (cos != ODP_COS_INVALID) + odp_cos_destroy(cos); + + if (default_cos != ODP_COS_INVALID) + odp_cos_destroy(default_cos); + + if (queue != ODP_QUEUE_INVALID) + odp_queue_destroy(queue); + + CU_ASSERT(odp_pktio_stop(pktio) == 0); + CU_ASSERT(odp_pktio_close(pktio) == 0); +} + +static void pktio_test_enable_pause_rx(void) +{ + pktio_config_flow_control(0, 1, 0); +} + +static void pktio_test_enable_pause_tx(void) +{ + pktio_config_flow_control(0, 0, 1); +} + +static void pktio_test_enable_pause_both(void) +{ + pktio_config_flow_control(0, 1, 1); +} + +static void pktio_test_enable_pfc_rx(void) +{ + pktio_config_flow_control(1, 1, 0); +} + +static void pktio_test_enable_pfc_tx(void) +{ + pktio_config_flow_control(1, 0, 1); +} + +static void pktio_test_enable_pfc_both(void) +{ + pktio_config_flow_control(1, 1, 1); +} + static void pktio_test_pktin_queue_config_direct(void) { odp_pktio_t pktio; @@ -1959,28 +2200,33 @@ static void pktio_test_pktin_queue_config_queue(void) odp_pktio_capability_t capa; odp_pktin_queue_param_t queue_param; odp_pktin_queue_t pktin_queues[MAX_QUEUES]; - odp_queue_t in_queues[MAX_QUEUES]; int num_queues; - pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE, ODP_PKTOUT_MODE_DIRECT); + pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT); CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID); CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 && capa.max_input_queues > 0); num_queues = capa.max_input_queues; + CU_ASSERT_FATAL(num_queues <= ODP_PKTIN_MAX_QUEUES); + + CU_ASSERT(capa.min_input_queue_size <= capa.max_input_queue_size); odp_pktin_queue_param_init(&queue_param); queue_param.hash_enable = (num_queues > 1) ? 1 : 0; queue_param.hash_proto.proto.ipv4_udp = 1; queue_param.num_queues = num_queues; + for (int i = 0; i < num_queues; i++) + queue_param.queue_size[i] = capa.max_input_queue_size; + CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0); - CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES) - == num_queues); - CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0); + CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) == num_queues); queue_param.num_queues = 1; + queue_param.queue_size[0] = capa.min_input_queue_size; + CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0); queue_param.num_queues = capa.max_input_queues + 1; @@ -4927,6 +5173,12 @@ odp_testinfo_t pktio_suite_unsegmented[] = { pktio_check_pktout_compl_plain_queue), ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_compl_sched_queue, pktio_check_pktout_compl_sched_queue), + ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_rx, pktio_check_pause_rx), + ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_tx, pktio_check_pause_tx), + ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_both, pktio_check_pause_both), + ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_rx, pktio_check_pfc_rx), + ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_tx, pktio_check_pfc_tx), + ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_both, pktio_check_pfc_both), ODP_TEST_INFO_NULL }; diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c index a10e7d1d98..86d070a4ac 100644 --- a/test/validation/api/shmem/shmem.c +++ b/test/validation/api/shmem/shmem.c @@ -261,6 +261,118 @@ static void shmem_test_reserve(void) CU_ASSERT(odp_shm_free(shm) == 0); } +static void shmem_test_info(void) +{ + odp_shm_t shm; + void *addr; + int ret; + uint32_t i; + uint64_t sum_len; + uintptr_t next; + odp_shm_info_t info; + const char *name = "info_test"; + uint32_t num_seg = 32; + uint64_t size = 4 * 1024 * 1024; + uint64_t align = 64; + int support_pa = 0; + int support_iova = 0; + + if (_global_shm_capa.max_size && _global_shm_capa.max_size < size) + size = _global_shm_capa.max_size; + + if (_global_shm_capa.max_align < align) + align = _global_shm_capa.max_align; + + shm = odp_shm_reserve(name, size, align, 0); + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + addr = odp_shm_addr(shm); + CU_ASSERT(addr != NULL); + + if (addr) + memset(addr, 0, size); + + memset(&info, 0, sizeof(odp_shm_info_t)); + ret = odp_shm_info(shm, &info); + + CU_ASSERT_FATAL(ret == 0); + CU_ASSERT(strcmp(name, info.name) == 0); + CU_ASSERT(info.addr == addr); + CU_ASSERT(info.size == size); + CU_ASSERT(info.page_size > 0); + CU_ASSERT(info.flags == 0); + CU_ASSERT(info.num_seg > 0); + + /* Limit number of segments as it may get large with small page sizes */ + if (info.num_seg < num_seg) + num_seg = info.num_seg; + + /* all segments */ + odp_shm_segment_info_t seginfo_a[num_seg]; + + memset(seginfo_a, 0, num_seg * sizeof(odp_shm_segment_info_t)); + + ret = odp_shm_segment_info(shm, 0, num_seg, seginfo_a); + CU_ASSERT_FATAL(ret == 0); + + CU_ASSERT(seginfo_a[0].addr == (uintptr_t)addr); + + sum_len = 0; + next = 0; + + printf("\n\n"); + printf("SHM segment info\n"); + printf("%3s %16s %16s %16s %16s\n", "idx", "addr", "iova", "pa", "len"); + + for (i = 0; i < num_seg; i++) { + printf("%3u %16" PRIxPTR " %16" PRIx64 " %16" PRIx64 " %16" PRIu64 "\n", + i, seginfo_a[i].addr, seginfo_a[i].iova, seginfo_a[i].pa, seginfo_a[i].len); + + CU_ASSERT(seginfo_a[i].addr != 0); + CU_ASSERT(seginfo_a[i].len > 0); + + if (next) { + CU_ASSERT(seginfo_a[i].addr == next); + next += seginfo_a[i].len; + } else { + next = seginfo_a[i].addr + seginfo_a[i].len; + } + + if (seginfo_a[i].iova != ODP_SHM_IOVA_INVALID) + support_iova = 1; + + if (seginfo_a[i].pa != ODP_SHM_PA_INVALID) + support_pa = 1; + + sum_len += seginfo_a[i].len; + } + + printf("\n"); + printf("IOVA: %s, PA: %s\n\n", support_iova ? "supported" : "not supported", + support_pa ? "supported" : "not supported"); + + CU_ASSERT(sum_len == size); + + if (num_seg > 1) { + /* all, except the first one */ + odp_shm_segment_info_t seginfo_b[num_seg]; + + memset(seginfo_b, 0xff, num_seg * sizeof(odp_shm_segment_info_t)); + + ret = odp_shm_segment_info(shm, 1, num_seg - 1, &seginfo_b[1]); + CU_ASSERT_FATAL(ret == 0); + + for (i = 1; i < num_seg; i++) { + CU_ASSERT(seginfo_a[i].addr == seginfo_b[i].addr); + CU_ASSERT(seginfo_a[i].iova == seginfo_b[i].iova); + CU_ASSERT(seginfo_a[i].pa == seginfo_b[i].pa); + CU_ASSERT(seginfo_a[i].len == seginfo_b[i].len); + } + } + + CU_ASSERT(odp_shm_free(shm) == 0); +} + static int shmem_check_flag_hp(void) { if (_global_shm_capa.flags & ODP_SHM_HP) @@ -1027,6 +1139,7 @@ static int shm_suite_init(void) odp_testinfo_t shmem_suite[] = { ODP_TEST_INFO(shmem_test_capability), ODP_TEST_INFO(shmem_test_reserve), + ODP_TEST_INFO(shmem_test_info), ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hp, shmem_check_flag_hp), ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_no_hp, shmem_check_flag_no_hp), ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_proc, shmem_check_flag_proc), diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c index b1fcbe3c35..f0cf48738d 100644 --- a/test/validation/api/system/system.c +++ b/test/validation/api/system/system.c @@ -579,6 +579,51 @@ static void system_test_info(void) } } +static void system_test_meminfo(void) +{ + const int32_t max_num = 128; + odp_system_meminfo_t info, info_0; + int32_t ret, ret_0, num, i; + odp_system_memblock_t block[max_num]; + + /* Meminfo without blocks */ + ret_0 = odp_system_meminfo(&info_0, NULL, 0); + CU_ASSERT_FATAL(ret_0 >= 0); + + ret = odp_system_meminfo(&info, block, max_num); + CU_ASSERT_FATAL(ret >= 0); + + /* Totals should match independent of per block output */ + CU_ASSERT(ret == ret_0); + CU_ASSERT(info_0.total_mapped == info.total_mapped); + CU_ASSERT(info_0.total_used == info.total_used); + CU_ASSERT(info_0.total_overhead == info.total_overhead); + + CU_ASSERT(info.total_mapped >= info.total_used); + CU_ASSERT(info.total_used >= info.total_overhead); + + num = ret; + if (ret > max_num) + num = max_num; + + printf("\n\n"); + printf("System meminfo contain %i blocks, printing %i blocks:\n", ret, num); + + printf(" %s %-32s %16s %14s %14s %12s\n", "index", "name", "addr", + "used", "overhead", "page_size"); + + for (i = 0; i < num; i++) { + printf(" [%3i] %-32s %16" PRIxPTR " %14" PRIu64 " %14" PRIu64 " %12" PRIu64 "\n", + i, block[i].name, block[i].addr, block[i].used, block[i].overhead, + block[i].page_size); + } + + printf("\n"); + printf("Total mapped: %" PRIu64 "\n", info.total_mapped); + printf("Total used: %" PRIu64 "\n", info.total_used); + printf("Total overhead: %" PRIu64 "\n\n", info.total_overhead); +} + odp_testinfo_t system_suite[] = { ODP_TEST_INFO(test_version_api_str), ODP_TEST_INFO(test_version_str), @@ -609,6 +654,7 @@ odp_testinfo_t system_suite[] = { ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_long_period, system_check_cycle_counter), ODP_TEST_INFO(system_test_info), + ODP_TEST_INFO(system_test_meminfo), ODP_TEST_INFO(system_test_info_print), ODP_TEST_INFO(system_test_config_print), ODP_TEST_INFO_NULL,