35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
70 #include <sys/queue.h>
86 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
87 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
88 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
90 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
94 struct rte_mempool_debug_stats {
97 uint64_t get_success_bulk;
98 uint64_t get_success_objs;
99 uint64_t get_fail_bulk;
100 uint64_t get_fail_objs;
115 void *
objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
130 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
131 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
132 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
135 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
137 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
140 #define MEMPOOL_PG_NUM_DEFAULT 1
142 #ifndef RTE_MEMPOOL_ALIGN
143 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
146 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
165 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
175 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
183 struct rte_mempool_objtlr {
264 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
266 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
270 #define MEMPOOL_F_NO_SPREAD 0x0001
271 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
272 #define MEMPOOL_F_SP_PUT 0x0004
273 #define MEMPOOL_F_SC_GET 0x0008
274 #define MEMPOOL_F_POOL_CREATED 0x0010
275 #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020
281 #define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
293 #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
305 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
306 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
307 unsigned __lcore_id = rte_lcore_id(); \
308 if (__lcore_id < RTE_MAX_LCORE) { \
309 mp->stats[__lcore_id].name##_objs += n; \
310 mp->stats[__lcore_id].name##_bulk += 1; \
314 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
325 #define MEMPOOL_HEADER_SIZE(mp, cs) \
326 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
327 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
352 static inline struct rte_mempool_objtlr *__mempool_get_trailer(
void *obj)
372 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
373 void *
const *obj_table_const,
unsigned n,
int free);
375 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
376 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
377 rte_mempool_check_cookies(mp, obj_table_const, n, free)
379 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
382 #define RTE_MEMPOOL_OPS_NAMESIZE 32
394 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
405 void *
const *obj_table,
unsigned int n);
411 void **obj_table,
unsigned int n);
422 unsigned int *
flags);
448 #define RTE_MEMPOOL_MAX_OPS_IDX 16
459 struct rte_mempool_ops_table {
481 rte_mempool_get_ops(
int ops_index)
514 rte_mempool_ops_dequeue_bulk(
struct rte_mempool *mp,
515 void **obj_table,
unsigned n)
519 ops = rte_mempool_get_ops(mp->
ops_index);
520 return ops->
dequeue(mp, obj_table, n);
537 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
542 ops = rte_mempool_get_ops(mp->
ops_index);
543 return ops->
enqueue(mp, obj_table, n);
555 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
571 rte_mempool_ops_get_capabilities(
const struct rte_mempool *mp,
572 unsigned int *flags);
591 rte_mempool_ops_register_memory_area(
const struct rte_mempool *mp,
641 #define MEMPOOL_REGISTER_OPS(ops) \
642 void mp_hdlr_init_##ops(void); \
643 void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
645 rte_mempool_register_ops(&ops); \
654 void *opaque,
void *obj,
unsigned obj_idx);
824 const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
909 int rte_mempool_populate_phys(
struct rte_mempool *mp,
char *vaddr,
940 const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
944 int rte_mempool_populate_phys_tab(
struct rte_mempool *mp,
char *vaddr,
945 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
1091 rte_mempool_ops_enqueue_bulk(mp, cache->
objs, cache->
len);
1111 if (lcore_id >= RTE_MAX_LCORE)
1130 __mempool_generic_put(
struct rte_mempool *mp,
void *
const *obj_table,
1136 __MEMPOOL_STAT_ADD(mp, put, n);
1139 if (
unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1142 cache_objs = &cache->
objs[cache->
len];
1152 rte_memcpy(&cache_objs[0], obj_table,
sizeof(
void *) * n);
1157 rte_mempool_ops_enqueue_bulk(mp, &cache->
objs[cache->
size],
1167 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1168 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1169 rte_panic(
"cannot put objects in mempool\n");
1171 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1192 __mempool_check_cookies(mp, obj_table, n, 0);
1193 __mempool_generic_put(mp, obj_table, n, cache);
1252 __mempool_generic_get(
struct rte_mempool *mp,
void **obj_table,
1256 uint32_t index,
len;
1263 cache_objs = cache->
objs;
1266 if (cache->
len < n) {
1268 uint32_t req = n + (cache->
size - cache->
len);
1271 ret = rte_mempool_ops_dequeue_bulk(mp,
1272 &cache->
objs[cache->
len], req);
1287 for (index = 0, len = cache->
len - 1; index < n; ++index, len--, obj_table++)
1288 *obj_table = cache_objs[
len];
1292 __MEMPOOL_STAT_ADD(mp, get_success, n);
1299 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1302 __MEMPOOL_STAT_ADD(mp, get_fail, n);
1304 __MEMPOOL_STAT_ADD(mp, get_success, n);
1334 ret = __mempool_generic_get(mp, obj_table, n, cache);
1336 __mempool_check_cookies(mp, obj_table, n, 1);
1581 uint32_t pg_shift,
unsigned int flags);
1612 size_t total_elt_sz,
const rte_iova_t iova[], uint32_t pg_num,
1613 uint32_t pg_shift,
unsigned int flags);
#define __rte_always_inline
struct rte_mempool * rte_mempool_lookup(const char *name)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
STAILQ_ENTRY(rte_mempool_objhdr) next
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
rte_mempool_get_capabilities_t get_capabilities
void rte_mempool_list_dump(FILE *f)
void( rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, void *opaque)
rte_mempool_alloc_t alloc
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
static int rte_mempool_empty(const struct rte_mempool *mp)
struct rte_mbuf __rte_cache_aligned
rte_mempool_memchunk_free_cb_t * free_cb
void( rte_mempool_ctor_t)(struct rte_mempool *, void *)
char name[RTE_MEMPOOL_OPS_NAMESIZE]
#define MEMPOOL_HEADER_SIZE(mp, cs)
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift, unsigned int flags)
struct rte_mempool_objhdr_list elt_list
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
#define RTE_PTR_ADD(ptr, x)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr, const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
int rte_mempool_populate_default(struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
#define RTE_MEMPOOL_OPS_NAMESIZE
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
rte_mempool_get_count get_count
static int rte_mempool_full(const struct rte_mempool *mp)
static unsigned rte_lcore_id(void)
int(* rte_mempool_ops_register_memory_area_t)(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void( rte_mempool_obj_cb_t)(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *3]
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void( rte_mempool_mem_cb_t)(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
void rte_mempool_audit(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
unsigned private_data_size
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool_cache * local_cache
STAILQ_ENTRY(rte_mempool_memhdr) next
rte_mempool_dequeue_t dequeue
void(* rte_mempool_free_t)(struct rte_mempool *mp)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
rte_mempool_ops_register_memory_area_t register_memory_area
struct rte_mempool_memhdr_list mem_list
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static void * rte_memcpy(void *dst, const void *src, size_t n)
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, unsigned int flags)
void rte_mempool_free(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
int(* rte_mempool_get_capabilities_t)(const struct rte_mempool *mp, unsigned int *flags)
#define RTE_MEMPOOL_MAX_OPS_IDX
struct rte_mempool * rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags, void *vaddr, const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift)
rte_mempool_enqueue_t enqueue
#define RTE_MEMZONE_NAMESIZE