目录
VPP使用者几乎都会使用dpdk node作为收包驱动,本文将分析其源码。
- 基本概念
vlib_buffer_t
dpdk收到的数据包用rte_mbuf结构描述。vpp为了兼容其它收包node(netmap,pcap等)改为使用vlib_buffer_t来描述数据包。
原图:https://blog.csdn.net/jqh9804/article/details/53066676
vlib_buffer_t紧跟在rte_mbuf后面,headroom空间中。
vlib_buffer_from_rte_mbuf和rte_mbuf_from_vlib_buffer完成相互转化
#define rte_mbuf_from_vlib_buffer(x) (((struct rte_mbuf *)x) - 1)
#define vlib_buffer_from_rte_mbuf(x) ((vlib_buffer_t *)(x+1))
值得注意的是,该结构使用了三个CLIB_CACHE_LINE_ALIGN_MARK宏,把结构分成了3段,确保每段都是CLIB_CACHE_LINE_BYTES对齐。作为一个被频繁使用的结构,这样设计可以更好的更具需要进行内存预取。
#define CLIB_CACHE_LINE_ALIGN_MARK(mark) u8 mark[0] __attribute__((aligned(CLIB_CACHE_LINE_BYTES)))
vlib_get_next_frame,vlib_put_next_frame
几乎每个node中必定出现的一对好基友。vlib_get_next_frame获取传递给下一个node的数据包将驻留的内存结构。vlib_put_next_frame把传递给下一个node的数据包写入特定位置。这样下一个node将正式可以被调度框架调度,并处理传给他的数据包
#define vlib_get_next_frame(vm,node,next_index,vectors,n_vectors_left) \
vlib_get_next_frame_macro (vm, node, next_index, \
vectors, n_vectors_left, \
/* alloc new frame */ 0)
#define vlib_get_new_next_frame(vm,node,next_index,vectors,n_vectors_left) \
vlib_get_next_frame_macro (vm, node, next_index, \
vectors, n_vectors_left, \
/* alloc new frame */ 1)
EFD (early-fast-discard)
没有找到相关资料,估计也是一个性能优化特性,之后再补个人对作者意图的猜测。
- 核心函数
dpdk_device_input
static_always_inline u32
dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
{
uword n_rx_packets = 0, n_rx_bytes;
dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, queue_id);
u32 n_left, n_trace;
u32 *buffers;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
struct rte_mbuf **mb;
vlib_buffer_t *b0;
u16 *next;
u16 or_flags;
u32 n;
int single_next = 0;
dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
thread_index);
vlib_buffer_t *bt = &ptd->buffer_template;
if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
return 0;
/* get up to DPDK_RX_BURST_SZ buffers from PMD */
while (n_rx_packets < DPDK_RX_BURST_SZ)
{
n = rte_eth_rx_burst (xd->port_id, queue_id,
ptd->mbufs + n_rx_packets,
DPDK_RX_BURST_SZ - n_rx_packets);
n_rx_packets += n;
if (n < 32)
break;
}
if (n_rx_packets == 0)
return 0;
/* Update buffer template */
vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
bt->error = node->errors[DPDK_ERROR_NONE];
/* as DPDK is allocating empty buffers from mempool provided before interface
start for each queue, it is safe to store this in the template */
bt->buffer_pool_index = rxq->buffer_pool_index;
bt->ref_count = 1;
vnet_buffer (bt)->feature_arc_index = 0;
bt->current_config_index = 0;
/* receive burst of packets from DPDK PMD */
if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
next_index = xd->per_interface_next_index;
/* as all packets belong to the same interface feature arc lookup
can be don once and result stored in the buffer template */
if (PREDICT_FALSE (vnet_device_input_have_features (xd->sw_if_index)))
vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
else
n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
if (PREDICT_FALSE (or_flags & PKT_RX_FDIR))
{
/* some packets will need to go to different next nodes */
for (n = 0; n < n_rx_packets; n++)
ptd->next[n] = next_index;
/* flow offload - process if rx flow offload enabled and at least one
packet is marked */
if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
(or_flags & PKT_RX_FDIR)))
dpdk_process_flow_offload (xd, ptd, n_rx_packets);
/* enqueue buffers to the next node */
vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
ptd->buffers, n_rx_packets,
sizeof (struct rte_mbuf));
vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
n_rx_packets);
}
else
{
u32 *to_next, n_left_to_next;
vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, to_next,
n_rx_packets,
sizeof (struct rte_mbuf));
if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
{
vlib_next_frame_t *nf;
vlib_frame_t *f;
ethernet_input_frame_t *ef;
nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
f = vlib_get_frame (vm, nf->frame);
f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
ef = vlib_frame_scalar_args (f);
ef->sw_if_index = xd->sw_if_index;
ef->hw_if_index = xd->hw_if_index;
/* if PMD supports ip4 checksum check and there are no packets
marked as ip4 checksum bad we can notify ethernet input so it
can send pacets to ip4-input-no-checksum node */
if (xd->flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM &&
(or_flags & PKT_RX_IP_CKSUM_BAD) == 0)
f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
vlib_frame_no_append (f);
}
n_left_to_next -= n_rx_packets;
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
single_next = 1;
}
/* packet trace if enabled */
if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
{
if (single_next)
vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
ptd->buffers, n_rx_packets,
sizeof (struct rte_mbuf));
n_left = n_rx_packets;
buffers = ptd->buffers;
mb = ptd->mbufs;
next = ptd->next;
while (n_trace && n_left)
{
b0 = vlib_get_buffer (vm, buffers[0]);
if (single_next == 0)
next_index = next[0];
vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
t0->queue_index = queue_id;
t0->device_index = xd->device_index;
t0->buffer_index = vlib_get_buffer_index (vm, b0);
clib_memcpy_fast (&t0->mb, mb[0], sizeof t0->mb);
clib_memcpy_fast (&t0->buffer, b0,
sizeof b0[0] - sizeof b0->pre_data);
clib_memcpy_fast (t0->buffer.pre_data, b0->data,
sizeof t0->buffer.pre_data);
clib_memcpy_fast (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
sizeof t0->data);
n_trace--;
n_left--;
buffers++;
mb++;
next++;
}
vlib_set_trace_count (vm, node, n_trace);
}
vlib_increment_combined_counter
(vnet_get_main ()->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
n_rx_packets, n_rx_bytes);
vnet_device_increment_rx_packets (thread_index, n_rx_packets);
return n_rx_packets;
}
VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * f)
{
dpdk_main_t *dm = &dpdk_main;
dpdk_device_t *xd;
uword n_rx_packets = 0;
vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
vnet_device_and_queue_t *dq;
u32 thread_index = node->thread_index;
/*
* Poll all devices on this cpu for input/interrupts.
*/
/* *INDENT-OFF* */
foreach_device_and_queue (dq, rt->devices_and_queues)
{
xd = vec_elt_at_index(dm->devices, dq->dev_instance);
n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
dq->queue_id);
}
/* *INDENT-ON* */
return n_rx_packets;
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (dpdk_input_node) = {
.type = VLIB_NODE_TYPE_INPUT,
.name = "dpdk-input",
.sibling_of = "device-input",
.flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
/* Will be enabled if/when hardware is detected. */
.state = VLIB_NODE_STATE_DISABLED,
.format_buffer = format_ethernet_header_with_length,
.format_trace = format_dpdk_rx_trace,
.n_errors = DPDK_N_ERROR,
.error_strings = dpdk_error_strings,
};
/* *INDENT-ON* */