11 #ifndef _RTE_CUCKOO_HASH_H_
12 #define _RTE_CUCKOO_HASH_H_
14 #if defined(RTE_ARCH_X86)
15 #include "rte_cmp_x86.h"
18 #if defined(RTE_ARCH_ARM64)
19 #include "rte_cmp_arm64.h"
23 #if defined(RTE_LIBRTE_HASH_DEBUG)
24 #define RETURN_IF_TRUE(cond, retval) do { \
29 #define RETURN_IF_TRUE(cond, retval)
35 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
40 enum cmp_jump_table_case {
75 enum cmp_jump_table_case {
94 #define RTE_HASH_BUCKET_ENTRIES 8
96 #if !RTE_IS_POWER_OF_2(RTE_HASH_BUCKET_ENTRIES)
97 #error RTE_HASH_BUCKET_ENTRIES must be a power of 2
100 #define NULL_SIGNATURE 0
104 #define KEY_ALIGNMENT 16
106 #define LCORE_CACHE_SIZE 64
108 #define RTE_HASH_BFS_QUEUE_MAX_LEN 1000
110 #define RTE_XABORT_CUCKOO_PATH_INVALIDED 0x4
112 #define RTE_HASH_TSX_MAX_RETRY 10
116 void *objs[LCORE_CACHE_SIZE];
120 struct rte_hash_key {
130 enum rte_hash_sig_compare_function {
131 RTE_HASH_COMPARE_SCALAR = 0,
132 RTE_HASH_COMPARE_SSE,
138 uint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];
140 uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES];
142 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
208 uint32_t cur_bkt_idx;
210 struct queue_node *prev;
uint8_t readwrite_concur_lf_support
enum rte_hash_sig_compare_function sig_cmp_fn
uint8_t readwrite_concur_support
uint32_t key_len __rte_cache_aligned
rte_hash_function hash_func
#define RTE_HASH_NAMESIZE
char name[RTE_HASH_NAMESIZE]
int(* rte_hash_cmp_eq_t)(const void *key1, const void *key2, size_t key_len)
struct rte_ring * free_slots
struct rte_ring * free_ext_bkts
uint32_t(* rte_hash_function)(const void *key, uint32_t key_len, uint32_t init_val)
struct lcore_cache * local_free_slots
rte_rwlock_t * readwrite_lock
uint8_t hw_trans_mem_support
#define __rte_cache_aligned
struct rte_hash_bucket * buckets_ext
rte_hash_cmp_eq_t rte_hash_custom_cmp_eq
uint8_t writer_takes_lock
uint8_t ext_table_support
uint32_t hash_func_init_val
struct rte_hash_bucket * buckets
enum cmp_jump_table_case cmp_jump_table_idx