DPDK  18.11.0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_mbuf_ptype.h>
44 
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48 
49 /*
50  * Packet Offload Features Flags. It also carry packet type information.
51  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
52  *
53  * - RX flags start at bit position zero, and get added to the left of previous
54  * flags.
55  * - The most-significant 3 bits are reserved for generic mbuf flags
56  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
57  * added to the right of the previously defined flags i.e. they should count
58  * downwards, not upwards.
59  *
60  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
61  * rte_get_tx_ol_flag_name().
62  */
63 
71 #define PKT_RX_VLAN (1ULL << 0)
72 
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
84 
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
93 
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
103 
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
113 
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
118 
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
128 
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
133 
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
148 
154 #define PKT_RX_LRO (1ULL << 16)
155 
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
160 
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
165 
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
170 
179 #define PKT_RX_QINQ (1ULL << 20)
180 
193 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
194 
195 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
196 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
197 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
198 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
199 
200 /* add new RX flags here */
201 
202 /* add new TX flags here */
203 
207 #define PKT_TX_METADATA (1ULL << 40)
208 
218 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
219 
225 #define PKT_TX_UDP_SEG (1ULL << 42)
226 
230 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
231 
236 #define PKT_TX_MACSEC (1ULL << 44)
237 
246 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
247 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
248 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
249 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
250 
251 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
252 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
253 
264 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
265 
277 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
278 /* add new TX TUNNEL type here */
279 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
280 
284 #define PKT_TX_QINQ (1ULL << 49)
285 /* this old name is deprecated */
286 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
287 
297 #define PKT_TX_TCP_SEG (1ULL << 50)
298 
299 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
309 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
310 #define PKT_TX_TCP_CKSUM (1ULL << 52)
311 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
312 #define PKT_TX_UDP_CKSUM (3ULL << 52)
313 #define PKT_TX_L4_MASK (3ULL << 52)
321 #define PKT_TX_IP_CKSUM (1ULL << 54)
322 
329 #define PKT_TX_IPV4 (1ULL << 55)
330 
337 #define PKT_TX_IPV6 (1ULL << 56)
338 
342 #define PKT_TX_VLAN (1ULL << 57)
343 /* this old name is deprecated */
344 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
345 
352 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
353 
359 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
360 
366 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
367 
372 #define PKT_TX_OFFLOAD_MASK ( \
373  PKT_TX_OUTER_IPV6 | \
374  PKT_TX_OUTER_IPV4 | \
375  PKT_TX_OUTER_IP_CKSUM | \
376  PKT_TX_VLAN_PKT | \
377  PKT_TX_IPV6 | \
378  PKT_TX_IPV4 | \
379  PKT_TX_IP_CKSUM | \
380  PKT_TX_L4_MASK | \
381  PKT_TX_IEEE1588_TMST | \
382  PKT_TX_TCP_SEG | \
383  PKT_TX_QINQ_PKT | \
384  PKT_TX_TUNNEL_MASK | \
385  PKT_TX_MACSEC | \
386  PKT_TX_SEC_OFFLOAD | \
387  PKT_TX_UDP_SEG | \
388  PKT_TX_OUTER_UDP_CKSUM | \
389  PKT_TX_METADATA)
390 
394 #define EXT_ATTACHED_MBUF (1ULL << 61)
395 
396 #define IND_ATTACHED_MBUF (1ULL << 62)
399 #define RTE_MBUF_PRIV_ALIGN 8
400 
409 const char *rte_get_rx_ol_flag_name(uint64_t mask);
410 
423 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
424 
435 const char *rte_get_tx_ol_flag_name(uint64_t mask);
436 
449 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
450 
457 #define RTE_MBUF_DEFAULT_DATAROOM 2048
458 #define RTE_MBUF_DEFAULT_BUF_SIZE \
459  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
460 
461 /* define a set of marker types that can be used to refer to set points in the
462  * mbuf */
463 __extension__
464 typedef void *MARKER[0];
465 __extension__
466 typedef uint8_t MARKER8[0];
467 __extension__
468 typedef uint64_t MARKER64[0];
474 struct rte_mbuf {
475  MARKER cacheline0;
476 
477  void *buf_addr;
485  union {
486  rte_iova_t buf_iova;
488  } __rte_aligned(sizeof(rte_iova_t));
489 
490  /* next 8 bytes are initialised on RX descriptor rearm */
491  MARKER64 rearm_data;
492  uint16_t data_off;
493 
504  union {
506  uint16_t refcnt;
507  };
508  uint16_t nb_segs;
513  uint16_t port;
514 
515  uint64_t ol_flags;
517  /* remaining bytes are set on RX when pulling packet from descriptor */
518  MARKER rx_descriptor_fields1;
519 
520  /*
521  * The packet type, which is the combination of outer/inner L2, L3, L4
522  * and tunnel types. The packet_type is about data really present in the
523  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
524  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
525  * vlan is stripped from the data.
526  */
528  union {
529  uint32_t packet_type;
530  struct {
531  uint32_t l2_type:4;
532  uint32_t l3_type:4;
533  uint32_t l4_type:4;
534  uint32_t tun_type:4;
536  union {
542  __extension__
543  struct {
544  uint8_t inner_l2_type:4;
546  uint8_t inner_l3_type:4;
548  };
549  };
550  uint32_t inner_l4_type:4;
551  };
552  };
553 
554  uint32_t pkt_len;
555  uint16_t data_len;
557  uint16_t vlan_tci;
558 
560  union {
561  union {
562  uint32_t rss;
563  struct {
564  union {
565  struct {
566  uint16_t hash;
567  uint16_t id;
568  };
569  uint32_t lo;
571  };
572  uint32_t hi;
576  } fdir;
577  struct {
578  uint32_t lo;
579  uint32_t hi;
584  } sched;
586  uint32_t usr;
587  } hash;
588  struct {
596  uint32_t tx_metadata;
597  uint32_t reserved;
598  };
599  };
600 
602  uint16_t vlan_tci_outer;
603 
604  uint16_t buf_len;
609  uint64_t timestamp;
610 
611  /* second cache line - fields only used in slow path or on TX */
612  MARKER cacheline1 __rte_cache_min_aligned;
613 
615  union {
616  void *userdata;
617  uint64_t udata64;
618  };
619 
620  struct rte_mempool *pool;
621  struct rte_mbuf *next;
623  /* fields to support TX offloads */
625  union {
626  uint64_t tx_offload;
627  __extension__
628  struct {
629  uint64_t l2_len:7;
633  uint64_t l3_len:9;
634  uint64_t l4_len:8;
635  uint64_t tso_segsz:16;
637  /* fields for TX offloading of tunnels */
638  uint64_t outer_l3_len:9;
639  uint64_t outer_l2_len:7;
641  /* uint64_t unused:8; */
642  };
643  };
644 
647  uint16_t priv_size;
648 
650  uint16_t timesync;
651 
653  uint32_t seqn;
654 
659 
661 
665 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
666 
672  void *fcb_opaque;
674 };
675 
677 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
678 
689 static inline void
691 {
692  rte_prefetch0(&m->cacheline0);
693 }
694 
706 static inline void
708 {
709 #if RTE_CACHE_LINE_SIZE == 64
710  rte_prefetch0(&m->cacheline1);
711 #else
712  RTE_SET_USED(m);
713 #endif
714 }
715 
716 
717 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
718 
727 static inline rte_iova_t
728 rte_mbuf_data_iova(const struct rte_mbuf *mb)
729 {
730  return mb->buf_iova + mb->data_off;
731 }
732 
733 __rte_deprecated
734 static inline phys_addr_t
735 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
736 {
737  return rte_mbuf_data_iova(mb);
738 }
739 
752 static inline rte_iova_t
754 {
755  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
756 }
757 
758 __rte_deprecated
759 static inline phys_addr_t
760 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
761 {
762  return rte_mbuf_data_iova_default(mb);
763 }
764 
773 static inline struct rte_mbuf *
775 {
776  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
777 }
778 
787 static inline char *
789 {
790  char *buffer_addr;
791  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
792  return buffer_addr;
793 }
794 
807 static inline void * __rte_experimental
809 {
810  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
811 }
812 
820 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
821 
826 #define RTE_MBUF_INDIRECT(mb) RTE_MBUF_CLONED(mb)
827 
833 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
834 
841 #define RTE_MBUF_DIRECT(mb) \
842  (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
843 
852  uint16_t mbuf_priv_size;
853 };
854 
855 #ifdef RTE_LIBRTE_MBUF_DEBUG
856 
858 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
859 
860 #else /* RTE_LIBRTE_MBUF_DEBUG */
861 
863 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
864 
865 #endif /* RTE_LIBRTE_MBUF_DEBUG */
866 
867 #ifdef RTE_MBUF_REFCNT_ATOMIC
868 
876 static inline uint16_t
877 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
878 {
879  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
880 }
881 
889 static inline void
890 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
891 {
892  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
893 }
894 
895 /* internal */
896 static inline uint16_t
897 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
898 {
899  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
900 }
901 
911 static inline uint16_t
912 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
913 {
914  /*
915  * The atomic_add is an expensive operation, so we don't want to
916  * call it in the case where we know we are the uniq holder of
917  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
918  * operation has to be used because concurrent accesses on the
919  * reference counter can occur.
920  */
921  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
922  ++value;
923  rte_mbuf_refcnt_set(m, (uint16_t)value);
924  return (uint16_t)value;
925  }
926 
927  return __rte_mbuf_refcnt_update(m, value);
928 }
929 
930 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
931 
932 /* internal */
933 static inline uint16_t
934 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
935 {
936  m->refcnt = (uint16_t)(m->refcnt + value);
937  return m->refcnt;
938 }
939 
943 static inline uint16_t
944 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
945 {
946  return __rte_mbuf_refcnt_update(m, value);
947 }
948 
952 static inline uint16_t
954 {
955  return m->refcnt;
956 }
957 
961 static inline void
962 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
963 {
964  m->refcnt = new_value;
965 }
966 
967 #endif /* RTE_MBUF_REFCNT_ATOMIC */
968 
977 static inline uint16_t
979 {
980  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
981 }
982 
991 static inline void
993  uint16_t new_value)
994 {
995  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
996 }
997 
1009 static inline uint16_t
1011  int16_t value)
1012 {
1013  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
1014  ++value;
1015  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
1016  return (uint16_t)value;
1017  }
1018 
1019  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
1020 }
1021 
1023 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1024  if ((m) != NULL) \
1025  rte_prefetch0(m); \
1026 } while (0)
1027 
1028 
1041 void
1042 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1043 
1044 #define MBUF_RAW_ALLOC_CHECK(m) do { \
1045  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
1046  RTE_ASSERT((m)->next == NULL); \
1047  RTE_ASSERT((m)->nb_segs == 1); \
1048  __rte_mbuf_sanity_check(m, 0); \
1049 } while (0)
1050 
1070 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
1071 {
1072  struct rte_mbuf *m;
1073 
1074  if (rte_mempool_get(mp, (void **)&m) < 0)
1075  return NULL;
1076  MBUF_RAW_ALLOC_CHECK(m);
1077  return m;
1078 }
1079 
1094 static __rte_always_inline void
1096 {
1097  RTE_ASSERT(RTE_MBUF_DIRECT(m));
1098  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
1099  RTE_ASSERT(m->next == NULL);
1100  RTE_ASSERT(m->nb_segs == 1);
1102  rte_mempool_put(m->pool, m);
1103 }
1104 
1124 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1125  void *m, unsigned i);
1126 
1127 
1145 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1146 
1181 struct rte_mempool *
1182 rte_pktmbuf_pool_create(const char *name, unsigned n,
1183  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1184  int socket_id);
1185 
1223 struct rte_mempool *
1224 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1225  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1226  int socket_id, const char *ops_name);
1227 
1239 static inline uint16_t
1241 {
1242  struct rte_pktmbuf_pool_private *mbp_priv;
1243 
1244  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1245  return mbp_priv->mbuf_data_room_size;
1246 }
1247 
1260 static inline uint16_t
1262 {
1263  struct rte_pktmbuf_pool_private *mbp_priv;
1264 
1265  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1266  return mbp_priv->mbuf_priv_size;
1267 }
1268 
1277 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1278 {
1279  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1280  (uint16_t)m->buf_len);
1281 }
1282 
1291 #define MBUF_INVALID_PORT UINT16_MAX
1292 
1293 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1294 {
1295  m->next = NULL;
1296  m->pkt_len = 0;
1297  m->tx_offload = 0;
1298  m->vlan_tci = 0;
1299  m->vlan_tci_outer = 0;
1300  m->nb_segs = 1;
1301  m->port = MBUF_INVALID_PORT;
1302 
1303  m->ol_flags = 0;
1304  m->packet_type = 0;
1306 
1307  m->data_len = 0;
1309 }
1310 
1324 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1325 {
1326  struct rte_mbuf *m;
1327  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1328  rte_pktmbuf_reset(m);
1329  return m;
1330 }
1331 
1346 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1347  struct rte_mbuf **mbufs, unsigned count)
1348 {
1349  unsigned idx = 0;
1350  int rc;
1351 
1352  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1353  if (unlikely(rc))
1354  return rc;
1355 
1356  /* To understand duff's device on loop unwinding optimization, see
1357  * https://en.wikipedia.org/wiki/Duff's_device.
1358  * Here while() loop is used rather than do() while{} to avoid extra
1359  * check if count is zero.
1360  */
1361  switch (count % 4) {
1362  case 0:
1363  while (idx != count) {
1364  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1365  rte_pktmbuf_reset(mbufs[idx]);
1366  idx++;
1367  /* fall-through */
1368  case 3:
1369  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1370  rte_pktmbuf_reset(mbufs[idx]);
1371  idx++;
1372  /* fall-through */
1373  case 2:
1374  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1375  rte_pktmbuf_reset(mbufs[idx]);
1376  idx++;
1377  /* fall-through */
1378  case 1:
1379  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1380  rte_pktmbuf_reset(mbufs[idx]);
1381  idx++;
1382  /* fall-through */
1383  }
1384  }
1385  return 0;
1386 }
1387 
1420 static inline struct rte_mbuf_ext_shared_info *
1421 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1423 {
1424  struct rte_mbuf_ext_shared_info *shinfo;
1425  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1426  void *addr;
1427 
1428  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1429  sizeof(uintptr_t));
1430  if (addr <= buf_addr)
1431  return NULL;
1432 
1433  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1434  shinfo->free_cb = free_cb;
1435  shinfo->fcb_opaque = fcb_opaque;
1436  rte_mbuf_ext_refcnt_set(shinfo, 1);
1437 
1438  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1439  return shinfo;
1440 }
1441 
1505 static inline void __rte_experimental
1506 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1507  rte_iova_t buf_iova, uint16_t buf_len,
1508  struct rte_mbuf_ext_shared_info *shinfo)
1509 {
1510  /* mbuf should not be read-only */
1511  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1512  RTE_ASSERT(shinfo->free_cb != NULL);
1513 
1514  m->buf_addr = buf_addr;
1515  m->buf_iova = buf_iova;
1516  m->buf_len = buf_len;
1517 
1518  m->data_len = 0;
1519  m->data_off = 0;
1520 
1522  m->shinfo = shinfo;
1523 }
1524 
1532 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1533 
1555 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1556 {
1557  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1558  rte_mbuf_refcnt_read(mi) == 1);
1559 
1560  if (RTE_MBUF_HAS_EXTBUF(m)) {
1562  mi->ol_flags = m->ol_flags;
1563  mi->shinfo = m->shinfo;
1564  } else {
1565  /* if m is not direct, get the mbuf that embeds the data */
1567  mi->priv_size = m->priv_size;
1568  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1569  }
1570 
1571  mi->buf_iova = m->buf_iova;
1572  mi->buf_addr = m->buf_addr;
1573  mi->buf_len = m->buf_len;
1574 
1575  mi->data_off = m->data_off;
1576  mi->data_len = m->data_len;
1577  mi->port = m->port;
1578  mi->vlan_tci = m->vlan_tci;
1579  mi->vlan_tci_outer = m->vlan_tci_outer;
1580  mi->tx_offload = m->tx_offload;
1581  mi->hash = m->hash;
1582 
1583  mi->next = NULL;
1584  mi->pkt_len = mi->data_len;
1585  mi->nb_segs = 1;
1586  mi->packet_type = m->packet_type;
1587  mi->timestamp = m->timestamp;
1588 
1589  __rte_mbuf_sanity_check(mi, 1);
1591 }
1592 
1600 static inline void
1601 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1602 {
1603  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1604  RTE_ASSERT(m->shinfo != NULL);
1605 
1606  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1607  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1608 }
1609 
1616 static inline void
1617 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1618 {
1619  struct rte_mbuf *md;
1620 
1621  RTE_ASSERT(RTE_MBUF_INDIRECT(m));
1622 
1623  md = rte_mbuf_from_indirect(m);
1624 
1625  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1626  md->next = NULL;
1627  md->nb_segs = 1;
1628  rte_mbuf_refcnt_set(md, 1);
1629  rte_mbuf_raw_free(md);
1630  }
1631 }
1632 
1646 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1647 {
1648  struct rte_mempool *mp = m->pool;
1649  uint32_t mbuf_size, buf_len;
1650  uint16_t priv_size;
1651 
1652  if (RTE_MBUF_HAS_EXTBUF(m))
1653  __rte_pktmbuf_free_extbuf(m);
1654  else
1655  __rte_pktmbuf_free_direct(m);
1656 
1657  priv_size = rte_pktmbuf_priv_size(mp);
1658  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1659  buf_len = rte_pktmbuf_data_room_size(mp);
1660 
1661  m->priv_size = priv_size;
1662  m->buf_addr = (char *)m + mbuf_size;
1663  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1664  m->buf_len = (uint16_t)buf_len;
1666  m->data_len = 0;
1667  m->ol_flags = 0;
1668 }
1669 
1684 static __rte_always_inline struct rte_mbuf *
1686 {
1688 
1689  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1690 
1691  if (!RTE_MBUF_DIRECT(m))
1692  rte_pktmbuf_detach(m);
1693 
1694  if (m->next != NULL) {
1695  m->next = NULL;
1696  m->nb_segs = 1;
1697  }
1698 
1699  return m;
1700 
1701  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1702 
1703  if (!RTE_MBUF_DIRECT(m))
1704  rte_pktmbuf_detach(m);
1705 
1706  if (m->next != NULL) {
1707  m->next = NULL;
1708  m->nb_segs = 1;
1709  }
1710  rte_mbuf_refcnt_set(m, 1);
1711 
1712  return m;
1713  }
1714  return NULL;
1715 }
1716 
1726 static __rte_always_inline void
1728 {
1729  m = rte_pktmbuf_prefree_seg(m);
1730  if (likely(m != NULL))
1731  rte_mbuf_raw_free(m);
1732 }
1733 
1743 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1744 {
1745  struct rte_mbuf *m_next;
1746 
1747  if (m != NULL)
1749 
1750  while (m != NULL) {
1751  m_next = m->next;
1753  m = m_next;
1754  }
1755 }
1756 
1774 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1775  struct rte_mempool *mp)
1776 {
1777  struct rte_mbuf *mc, *mi, **prev;
1778  uint32_t pktlen;
1779  uint16_t nseg;
1780 
1781  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1782  return NULL;
1783 
1784  mi = mc;
1785  prev = &mi->next;
1786  pktlen = md->pkt_len;
1787  nseg = 0;
1788 
1789  do {
1790  nseg++;
1791  rte_pktmbuf_attach(mi, md);
1792  *prev = mi;
1793  prev = &mi->next;
1794  } while ((md = md->next) != NULL &&
1795  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1796 
1797  *prev = NULL;
1798  mc->nb_segs = nseg;
1799  mc->pkt_len = pktlen;
1800 
1801  /* Allocation of new indirect segment failed */
1802  if (unlikely (mi == NULL)) {
1803  rte_pktmbuf_free(mc);
1804  return NULL;
1805  }
1806 
1807  __rte_mbuf_sanity_check(mc, 1);
1808  return mc;
1809 }
1810 
1822 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1823 {
1825 
1826  do {
1827  rte_mbuf_refcnt_update(m, v);
1828  } while ((m = m->next) != NULL);
1829 }
1830 
1839 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1840 {
1842  return m->data_off;
1843 }
1844 
1853 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1854 {
1856  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1857  m->data_len);
1858 }
1859 
1868 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1869 {
1871  while (m->next != NULL)
1872  m = m->next;
1873  return m;
1874 }
1875 
1890 #define rte_pktmbuf_mtod_offset(m, t, o) \
1891  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1892 
1905 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1906 
1916 #define rte_pktmbuf_iova_offset(m, o) \
1917  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1918 
1919 /* deprecated */
1920 #define rte_pktmbuf_mtophys_offset(m, o) \
1921  rte_pktmbuf_iova_offset(m, o)
1922 
1930 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1931 
1932 /* deprecated */
1933 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1934 
1943 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1944 
1953 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1954 
1970 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1971  uint16_t len)
1972 {
1974 
1975  if (unlikely(len > rte_pktmbuf_headroom(m)))
1976  return NULL;
1977 
1978  /* NB: elaborating the subtraction like this instead of using
1979  * -= allows us to ensure the result type is uint16_t
1980  * avoiding compiler warnings on gcc 8.1 at least */
1981  m->data_off = (uint16_t)(m->data_off - len);
1982  m->data_len = (uint16_t)(m->data_len + len);
1983  m->pkt_len = (m->pkt_len + len);
1984 
1985  return (char *)m->buf_addr + m->data_off;
1986 }
1987 
2003 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
2004 {
2005  void *tail;
2006  struct rte_mbuf *m_last;
2007 
2009 
2010  m_last = rte_pktmbuf_lastseg(m);
2011  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
2012  return NULL;
2013 
2014  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
2015  m_last->data_len = (uint16_t)(m_last->data_len + len);
2016  m->pkt_len = (m->pkt_len + len);
2017  return (char*) tail;
2018 }
2019 
2034 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
2035 {
2037 
2038  if (unlikely(len > m->data_len))
2039  return NULL;
2040 
2041  /* NB: elaborating the addition like this instead of using
2042  * += allows us to ensure the result type is uint16_t
2043  * avoiding compiler warnings on gcc 8.1 at least */
2044  m->data_len = (uint16_t)(m->data_len - len);
2045  m->data_off = (uint16_t)(m->data_off + len);
2046  m->pkt_len = (m->pkt_len - len);
2047  return (char *)m->buf_addr + m->data_off;
2048 }
2049 
2064 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
2065 {
2066  struct rte_mbuf *m_last;
2067 
2069 
2070  m_last = rte_pktmbuf_lastseg(m);
2071  if (unlikely(len > m_last->data_len))
2072  return -1;
2073 
2074  m_last->data_len = (uint16_t)(m_last->data_len - len);
2075  m->pkt_len = (m->pkt_len - len);
2076  return 0;
2077 }
2078 
2088 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
2089 {
2091  return !!(m->nb_segs == 1);
2092 }
2093 
2097 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
2098  uint32_t len, void *buf);
2099 
2120 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
2121  uint32_t off, uint32_t len, void *buf)
2122 {
2123  if (likely(off + len <= rte_pktmbuf_data_len(m)))
2124  return rte_pktmbuf_mtod_offset(m, char *, off);
2125  else
2126  return __rte_pktmbuf_read(m, off, len, buf);
2127 }
2128 
2145 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
2146 {
2147  struct rte_mbuf *cur_tail;
2148 
2149  /* Check for number-of-segments-overflow */
2150  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
2151  return -EOVERFLOW;
2152 
2153  /* Chain 'tail' onto the old tail */
2154  cur_tail = rte_pktmbuf_lastseg(head);
2155  cur_tail->next = tail;
2156 
2157  /* accumulate number of segments and total length.
2158  * NB: elaborating the addition like this instead of using
2159  * -= allows us to ensure the result type is uint16_t
2160  * avoiding compiler warnings on gcc 8.1 at least */
2161  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
2162  head->pkt_len += tail->pkt_len;
2163 
2164  /* pkt_len is only set in the head */
2165  tail->pkt_len = tail->data_len;
2166 
2167  return 0;
2168 }
2169 
2180 static inline int
2182 {
2183  uint64_t ol_flags = m->ol_flags;
2184  uint64_t inner_l3_offset = m->l2_len;
2185 
2186  /* Does packet set any of available offloads? */
2187  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
2188  return 0;
2189 
2190  if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2191  /* NB: elaborating the addition like this instead of using
2192  * += gives the result uint64_t type instead of int,
2193  * avoiding compiler warnings on gcc 8.1 at least */
2194  inner_l3_offset = inner_l3_offset + m->outer_l2_len +
2195  m->outer_l3_len;
2196 
2197  /* Headers are fragmented */
2198  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
2199  return -ENOTSUP;
2200 
2201  /* IP checksum can be counted only for IPv4 packet */
2202  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
2203  return -EINVAL;
2204 
2205  /* IP type not set when required */
2206  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
2207  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
2208  return -EINVAL;
2209 
2210  /* Check requirements for TSO packet */
2211  if (ol_flags & PKT_TX_TCP_SEG)
2212  if ((m->tso_segsz == 0) ||
2213  ((ol_flags & PKT_TX_IPV4) &&
2214  !(ol_flags & PKT_TX_IP_CKSUM)))
2215  return -EINVAL;
2216 
2217  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
2218  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2219  !(ol_flags & PKT_TX_OUTER_IPV4))
2220  return -EINVAL;
2221 
2222  return 0;
2223 }
2224 
2237 static inline int
2239 {
2240  size_t seg_len, copy_len;
2241  struct rte_mbuf *m;
2242  struct rte_mbuf *m_next;
2243  char *buffer;
2244 
2245  if (rte_pktmbuf_is_contiguous(mbuf))
2246  return 0;
2247 
2248  /* Extend first segment to the total packet length */
2249  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
2250 
2251  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
2252  return -1;
2253 
2254  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
2255  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
2256 
2257  /* Append data from next segments to the first one */
2258  m = mbuf->next;
2259  while (m != NULL) {
2260  m_next = m->next;
2261 
2262  seg_len = rte_pktmbuf_data_len(m);
2263  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
2264  buffer += seg_len;
2265 
2267  m = m_next;
2268  }
2269 
2270  mbuf->next = NULL;
2271  mbuf->nb_segs = 1;
2272 
2273  return 0;
2274 }
2275 
2290 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2291 
2292 #ifdef __cplusplus
2293 }
2294 #endif
2295 
2296 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
Definition: rte_mbuf.h:658
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:728
struct rte_mbuf * next
Definition: rte_mbuf.h:621
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:851
uint64_t timestamp
Definition: rte_mbuf.h:609
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:602
#define __rte_always_inline
Definition: rte_common.h:146
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:256
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1324
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:537
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:464
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:841
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:396
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:487
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1261
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2181
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1743
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:187
void
Definition: rte_common.h:616
uint64_t l2_len
Definition: rte_mbuf.h:629
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1774
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1727
void * buf_addr
Definition: rte_mbuf.h:477
uint32_t l2_type
Definition: rte_mbuf.h:531
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:774
uint16_t data_len
Definition: rte_mbuf.h:555
uint32_t lo
Definition: rte_mbuf.h:569
rte_mbuf_extbuf_free_callback_t free_cb
Definition: rte_mbuf.h:671
void * userdata
Definition: rte_mbuf.h:616
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:2145
uint8_t inner_l2_type
Definition: rte_mbuf.h:544
uint64_t tso_segsz
Definition: rte_mbuf.h:635
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:466
uint64_t l4_len
Definition: rte_mbuf.h:634
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1839
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1346
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1277
uint32_t cache_size
Definition: rte_mempool.h:230
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:352
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:707
#define PKT_TX_IPV6
Definition: rte_mbuf.h:337
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:1010
uint16_t nb_segs
Definition: rte_mbuf.h:508
uint16_t port
Definition: rte_mbuf.h:513
uint64_t outer_l3_len
Definition: rte_mbuf.h:638
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1685
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2088
uint64_t l3_len
Definition: rte_mbuf.h:633
uint32_t l4_type
Definition: rte_mbuf.h:533
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:158
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:359
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1853
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1095
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:297
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:647
uint16_t timesync
Definition: rte_mbuf.h:650
uint32_t hi
Definition: rte_mbuf.h:572
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:468
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:414
#define PKT_TX_IPV4
Definition: rte_mbuf.h:329
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:863
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:953
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:2238
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1483
RTE_STD_C11 union rte_mbuf::@173 __rte_aligned
uint64_t outer_l2_len
Definition: rte_mbuf.h:639
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:270
uint16_t refcnt
Definition: rte_mbuf.h:506
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2034
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1943
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1555
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1455
uint32_t tun_type
Definition: rte_mbuf.h:534
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:351
uint64_t ol_flags
Definition: rte_mbuf.h:515
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1646
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:978
uint32_t pkt_len
Definition: rte_mbuf.h:554
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:313
uint16_t buf_len
Definition: rte_mbuf.h:604
uint32_t inner_l4_type
Definition: rte_mbuf.h:550
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1953
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1905
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:944
uint32_t packet_type
Definition: rte_mbuf.h:529
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1291
uint32_t seqn
Definition: rte_mbuf.h:653
#define EXT_ATTACHED_MBUF
Definition: rte_mbuf.h:394
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1240
uint8_t inner_l3_type
Definition: rte_mbuf.h:546
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1421
#define RTE_MBUF_HAS_EXTBUF(mb)
Definition: rte_mbuf.h:833
#define RTE_STD_C11
Definition: rte_common.h:37
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:321
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:620
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:992
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2003
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:962
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:753
uint32_t tx_metadata
Definition: rte_mbuf.h:596
uint32_t rss
Definition: rte_mbuf.h:562
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2064
uint64_t rte_iova_t
Definition: rte_memory.h:82
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:788
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:2120
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1970
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1070
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1822
uint64_t phys_addr_t
Definition: rte_memory.h:73
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:163
#define __rte_cache_aligned
Definition: rte_memory.h:66
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1868
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:372
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1609
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1324
uint64_t udata64
Definition: rte_mbuf.h:617
uint32_t l3_type
Definition: rte_mbuf.h:532
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:170
struct rte_mbuf::@176::@188::@191 fdir
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:690
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
Definition: rte_mbuf.h:665
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:505
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1637
uint64_t tx_offload
Definition: rte_mbuf.h:626
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
uint16_t vlan_tci
Definition: rte_mbuf.h:557
static void *__rte_experimental rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:808
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:826
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:673
static void __rte_experimental rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1506
struct rte_mbuf::@176::@188::@192 sched
#define RTE_SET_USED(x)
Definition: rte_common.h:87
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1890
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)