diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index bee73353b56a8b..a66c2e65303516 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -16,6 +16,9 @@ struct idpf_vport_max_q; #include #include #include +#include +#include +#include #include #include @@ -376,6 +379,13 @@ struct idpf_vport { struct idpf_queue **txqs; bool crc_enable; + int num_xdp_txq; + int num_xdp_rxq; + int num_xdp_complq; + int xdp_txq_offset; + int xdp_rxq_offset; + int xdp_complq_offset; + u16 num_rxq; u16 num_bufq; u32 rxq_desc_count; @@ -423,6 +433,7 @@ struct idpf_vport { * @__IDPF_USER_FLAGS_NBITS: Must be last */ enum idpf_user_flags { + __IDPF_PRIV_FLAGS_HDR_SPLIT = 0, __IDPF_PROMISC_UC = 32, __IDPF_PROMISC_MC, @@ -466,6 +477,8 @@ struct idpf_vport_user_config_data { u16 num_req_rx_qs; u32 num_req_txq_desc; u32 num_req_rxq_desc; + /* Duplicated in queue structure for performance reasons */ + struct bpf_prog *xdp_prog; DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); struct list_head mac_filter_list; }; @@ -684,6 +697,18 @@ static inline int idpf_is_queue_model_split(u16 q_model) return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; } +/** + * idpf_xdp_is_prog_ena - check if there is an XDP program on adapter + * @vport: vport to check + */ +static inline bool idpf_xdp_is_prog_ena(struct idpf_vport *vport) +{ + if (!vport->adapter) + return false; + + return !!vport->adapter->vport_config[vport->idx]->user_config.xdp_prog; +} + #define idpf_is_cap_ena(adapter, field, flag) \ idpf_is_capability_ena(adapter, false, field, flag) #define idpf_is_cap_ena_all(adapter, field, flag) \ @@ -939,6 +964,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, u16 msg_size, u8 *msg); void idpf_set_ethtool_ops(struct net_device *netdev); +void idpf_vport_set_hsplit(struct idpf_vport *vport, bool ena); int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, struct idpf_vport_max_q *max_q); void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 52ea38669f85b3..01f58ab9932703 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -900,6 +900,8 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, if (!txq) idpf_add_empty_queue_stats(&data, qtype); + else if (test_bit(__IDPF_Q_XDP, txq->flags)) + continue; else idpf_add_queue_stats(&data, txq); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 19809b0ddcd909..a65bbe7c295178 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -839,6 +839,14 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) netdev->features |= dflt_features; netdev->hw_features |= dflt_features | offloads; netdev->hw_enc_features |= dflt_features | offloads; + /* TODO: XDP_TX action has to be implemented before + * enabling 'NETDEV_XDP_ACT_BASIC'. + * Move the line below to the commit where XDP_TX + * action will be implemented. + * Now, it has just been added to support testing + * of 2 simplest XDP actions: XDP_PASS and XDP_DROP. + */ + netdev->xdp_features = NETDEV_XDP_ACT_BASIC; idpf_set_ethtool_ops(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); @@ -1057,6 +1065,27 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) adapter->next_vport = idpf_get_free_slot(adapter); } +/** + * idpf_vport_set_hsplit - enable or disable header split on a given vport + * @vport: virtual port + * @ena: flag controlling header split, On (true) or Off (false) + */ +void idpf_vport_set_hsplit(struct idpf_vport *vport, bool ena) +{ + struct idpf_vport_user_config_data *config_data; + + config_data = &vport->adapter->vport_config[vport->idx]->user_config; + if (!ena) { + clear_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, config_data->user_flags); + return; + } + + if (idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT) && + idpf_is_queue_model_split(vport->rxq_model)) + set_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, config_data->user_flags); +} + /** * idpf_vport_alloc - Allocates the next available struct vport in the adapter * @adapter: board private structure @@ -1234,13 +1263,18 @@ static void idpf_restore_features(struct idpf_vport *vport) */ static int idpf_set_real_num_queues(struct idpf_vport *vport) { - int err; + int num_txq, err; err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); if (err) return err; - return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); + if (idpf_xdp_is_prog_ena(vport)) + num_txq = vport->num_txq - vport->num_xdp_txq; + else + num_txq = vport->num_txq; + + return netif_set_real_num_tx_queues(vport->netdev, num_txq); } /** @@ -1353,6 +1387,15 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) idpf_rx_init_buf_tail(vport); + if (idpf_xdp_is_prog_ena(vport)) { + err = idpf_xdp_rxq_info_init_all(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize XDP info for vport %u, %d\n", + vport->vport_id, err); + goto intr_deinit; + } + } + err = idpf_send_config_queues_msg(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", @@ -2192,10 +2235,18 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + if (idpf_xdp_is_prog_ena(vport) && new_mtu > IDPF_XDP_MAX_MTU) { + netdev_err(netdev, "New MTU value is not valid. The maximum MTU value is %d.\n", + IDPF_XDP_MAX_MTU); + err = -EINVAL; + goto unlock_exit; + } + netdev->mtu = new_mtu; err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); +unlock_exit: idpf_vport_ctrl_unlock(netdev); return err; @@ -2262,6 +2313,190 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +/** + * idpf_copy_xdp_prog_to_qs - set pointers to xdp program for each Rx queue + * @vport: vport to setup XDP for + * @xdp_prog: XDP program that should be copied to all Rx queues + */ +static void +idpf_copy_xdp_prog_to_qs(struct idpf_vport *vport, struct bpf_prog *xdp_prog) +{ + int i; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + struct idpf_queue *q; + u16 j, num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + WRITE_ONCE(q->xdp_prog, xdp_prog); + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + continue; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + WRITE_ONCE(q->xdp_prog, xdp_prog); + } + } +} + +static int idpf_xdp_reconfig_queues(struct idpf_vport *vport) +{ + int err; + + err = idpf_vport_adjust_qs(vport); + if (err) { + netdev_err(vport->netdev, + "Could not adjust queue number for XDP\n"); + return err; + } + idpf_vport_calc_num_q_desc(vport); + + err = idpf_vport_queues_alloc(vport); + if (err) { + netdev_err(vport->netdev, + "Could not allocate queues for XDP\n"); + return err; + } + + err = idpf_send_add_queues_msg(vport, vport->num_txq, + vport->num_complq, + vport->num_rxq, vport->num_bufq); + if (err) { + netdev_err(vport->netdev, + "Could not add queues for XDP, VC message sent failed\n"); + return err; + } + + idpf_vport_alloc_vec_indexes(vport); + + return 0; +} + +/** + * idpf_assign_bpf_prog - Assign a given BPF program to vport + * @current_prog: pointer to XDP program in user config data + * @prog: BPF program to be assigned to vport + */ +static void idpf_assign_bpf_prog(struct bpf_prog **current_prog, + struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + old_prog = xchg(current_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); +} + +/** + * idpf_xdp_setup_prog - Add or remove XDP eBPF program + * @vport: vport to setup XDP for + * @prog: XDP program + * @extack: netlink extended ack + */ +static int +idpf_xdp_setup_prog(struct idpf_vport *vport, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + int frame_size = vport->netdev->mtu; + bool needs_reconfig, vport_is_up; + struct bpf_prog **current_prog; + u16 idx = vport->idx; + int err; + + if (frame_size > IDPF_XDP_MAX_MTU || + frame_size > vport->bufq_size[0]) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); + return -EOPNOTSUPP; + } + + vport_is_up = np->state == __IDPF_VPORT_UP; + + current_prog = &vport->adapter->vport_config[idx]->user_config.xdp_prog; + needs_reconfig = !!(*current_prog) != !!prog; + + if (!needs_reconfig) { + idpf_copy_xdp_prog_to_qs(vport, prog); + idpf_assign_bpf_prog(current_prog, prog); + + return 0; + } + + if (!vport_is_up) { + idpf_send_delete_queues_msg(vport); + } else { + set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); + idpf_vport_stop(vport); + } + + idpf_deinit_rss(vport); + + if (!*current_prog && prog) { + netdev_warn(vport->netdev, + "Setting up XDP disables header split\n"); + idpf_vport_set_hsplit(vport, false); + } else { + idpf_vport_set_hsplit(vport, true); + } + + idpf_assign_bpf_prog(current_prog, prog); + + err = idpf_xdp_reconfig_queues(vport); + if (err) { + netdev_err(vport->netdev, + "Could not reconfigure the queues after XDP setup\n"); + return err; + } + + if (vport_is_up) { + err = idpf_vport_open(vport, false); + if (err) { + netdev_err(vport->netdev, + "Could not re-open the vport after XDP setup\n"); + return err; + } + } + + return 0; +} + +/** + * idpf_xdp - implements XDP handler + * @netdev: netdevice + * @xdp: XDP command + */ +static int idpf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + err = idpf_xdp_setup_prog(vport, xdp->prog, xdp->extack); + break; + default: + err = -EINVAL; + } + + idpf_vport_ctrl_unlock(netdev); + return err; +} + /** * idpf_set_mac - NDO callback to set port mac address * @netdev: network interface device structure @@ -2362,6 +2597,7 @@ static const struct net_device_ops idpf_netdev_ops_splitq = { .ndo_get_stats64 = idpf_get_stats64, .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, + .ndo_bpf = idpf_xdp, }; static const struct net_device_ops idpf_netdev_ops_singleq = { @@ -2376,4 +2612,5 @@ static const struct net_device_ops idpf_netdev_ops_singleq = { .ndo_get_stats64 = idpf_get_stats64, .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, + .ndo_bpf = idpf_xdp, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1646ff3877baa8..692d8b0b6ffa70 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -21,6 +21,17 @@ static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, return 0; } +/** + * iavf_is_xdp_enabled - Check if XDP is enabled on the rx or buffer queue + * @rxbufq: rx or buffer queue + * + * Returns true, if the queue has been configured for XDP. + */ +static bool idpf_is_xdp_enabled(const struct idpf_queue *rxbufq) +{ + return !!rcu_access_pointer(rxbufq->xdp_prog); +} + /** * idpf_buf_lifo_pop - pop a buffer pointer from stack * @stack: pointer to stack struct @@ -582,8 +593,10 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) * * Returns &page_pool on success, casted -errno on failure */ -static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq, + bool xdp) { + u32 hr = xdp ? XDP_PACKET_HEADROOM : 0; struct page_pool_params pp = { .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .order = 0, @@ -591,8 +604,8 @@ static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) .nid = NUMA_NO_NODE, .dev = rxbufq->vport->netdev->dev.parent, .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, - .offset = 0, + .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = hr, }; if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048) @@ -655,7 +668,8 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) { struct page_pool *pool; - pool = idpf_rx_create_page_pool(rxbufq); + pool = idpf_rx_create_page_pool(rxbufq, + idpf_is_xdp_enabled(rxbufq)); if (IS_ERR(pool)) return PTR_ERR(pool); @@ -705,6 +719,66 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) return 0; } +/** + * idpf_xdp_rxbufq_init - Prepare and configure XDP structures on Rx queue + * @q: rx queue where XDP should be initialized + * + * Returns 0 on success or error code in case of any failure + */ +static void idpf_xdp_rxbufq_init(struct idpf_queue *q) +{ + struct idpf_vport_user_config_data *config_data; + struct idpf_adapter *adapter; + int idx = q->vport->idx; + + adapter = q->vport->adapter; + config_data = &adapter->vport_config[idx]->user_config; + + WRITE_ONCE(q->xdp_prog, config_data->xdp_prog); +} + +static int idpf_xdp_rxq_info_init(struct idpf_queue *rxq) +{ + int err; + + if (!xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_reg(&rxq->xdp_rxq, rxq->vport->netdev, + rxq->idx, rxq->q_vector->napi.napi_id); + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + + return err; +} + +int idpf_xdp_rxq_info_init_all(struct idpf_vport *vport) +{ + struct idpf_rxq_group *rx_qgrp; + struct idpf_queue *q; + int i, j, err; + u16 num_rxq; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + err = idpf_xdp_rxq_info_init(q); + if (err) + return err; + } + } + + return 0; +} + /** * idpf_rx_desc_alloc - Allocate queue Rx resources * @rxq: Rx queue for which the resources are setup @@ -739,6 +813,9 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->next_to_use = 0; set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + if (idpf_xdp_is_prog_ena(rxq->vport)) + idpf_xdp_rxbufq_init(rxq); + return 0; } @@ -966,6 +1043,23 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, if (idpf_is_queue_model_split(vport->rxq_model)) vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); + vport->num_xdp_rxq = 0; + vport->xdp_rxq_offset = 0; + if (!idpf_xdp_is_prog_ena(vport)) { + vport->num_xdp_txq = 0; + vport->xdp_txq_offset = 0; + goto adjust_bufqs; + } + /* Do not create dummy Rx queues by default */ + vport->num_xdp_txq = le16_to_cpu(vport_msg->num_rx_q); + vport->xdp_txq_offset = le16_to_cpu(vport_msg->num_tx_q) - + le16_to_cpu(vport_msg->num_rx_q); + + if (idpf_is_queue_model_split(vport->txq_model)) { + vport->num_xdp_complq = vport->num_xdp_txq; + vport->xdp_complq_offset = vport->xdp_txq_offset; + } +adjust_bufqs: /* Adjust number of buffer queues per Rx queue group. */ if (!idpf_is_queue_model_split(vport->rxq_model)) { vport->num_bufqs_per_qgrp = 0; @@ -974,12 +1068,20 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, return; } - vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; - /* Bufq[0] default buffer size is 4K - * Bufq[1] default buffer size is 2K - */ - vport->bufq_size[0] = IDPF_RX_BUF_4096; - vport->bufq_size[1] = IDPF_RX_BUF_2048; + if (idpf_xdp_is_prog_ena(vport)) { + /* After loading the XDP program we will have only one buffer + * queue per group with buffer size 4kB. + */ + vport->num_bufqs_per_qgrp = IDPF_SINGLE_BUFQ_PER_RXQ_GRP; + vport->bufq_size[0] = IDPF_RX_BUF_4096; + } else { + vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; + /* Bufq[0] default buffer size is 4K + * Bufq[1] default buffer size is 2K + */ + vport->bufq_size[0] = IDPF_RX_BUF_4096; + vport->bufq_size[1] = IDPF_RX_BUF_2048; + } } /** @@ -1092,6 +1194,22 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, vport_msg->num_rx_q = cpu_to_le16(num_qs); vport_msg->num_rx_bufq = 0; } + if (!vport_config || !vport_config->user_config.xdp_prog) + return 0; + + /* As we now know new number of Rx and Tx queues, we can request + * additional Tx queues for XDP. For each Rx queue request additional + * Tx queue for XDP use. + */ + vport_msg->num_tx_q = + cpu_to_le16(le16_to_cpu(vport_msg->num_tx_q) + + le16_to_cpu(vport_msg->num_rx_q)); + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) + vport_msg->num_tx_complq = vport_msg->num_tx_q; + + /* For XDP request only one bufq per Rx queue group */ + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) + vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps); return 0; } @@ -1251,8 +1369,11 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + struct idpf_vport_user_config_data *config_data; + u16 idx = vport->idx; int j; + config_data = &adapter->vport_config[idx]->user_config; rx_qgrp->vport = vport; if (!idpf_is_queue_model_split(vport->rxq_model)) { rx_qgrp->singleq.num_rxq = num_rxq; @@ -1301,9 +1422,9 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + + if (test_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, + config_data->user_flags)) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } @@ -1347,9 +1468,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) rx_qgrp->splitq.rxq_sets[j]->refillq1 = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + if (test_bit(__IDPF_PRIV_FLAGS_HDR_SPLIT, + config_data->user_flags)) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } @@ -1435,6 +1555,13 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (err) goto err_out; + if (idpf_xdp_is_prog_ena(vport)) { + int j; + + for (j = vport->xdp_txq_offset; j < vport->num_txq; j++) + __set_bit(__IDPF_Q_XDP, vport->txqs[j]->flags); + } + return 0; err_out: @@ -3004,7 +3131,8 @@ struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, struct sk_buff *skb; void *va; - va = page_address(rx_buf->page) + rx_buf->page_offset; + va = page_address(rx_buf->page) + rx_buf->page_offset + + rx_buf->page->pp->p.offset; /* prefetch first cache line of first page */ net_prefetch(va); @@ -3108,6 +3236,37 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de IDPF_RXD_EOF_SPLITQ)); } +static unsigned int idpf_run_xdp(struct idpf_queue *rx_bufq, + struct idpf_rx_buf *rx_buf, + struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, + unsigned int size) +{ + u32 hr = rx_bufq->pp->p.offset; + unsigned int xdp_act; + + xdp_prepare_buff(xdp, page_address(rx_buf->page), hr, size, true); + + xdp_act = bpf_prog_run_xdp(xdp_prog, xdp); + rx_buf->truesize = max_t(u32, xdp->data_end - xdp->data_hard_start - hr, + rx_buf->truesize); + switch (xdp_act) { + case XDP_PASS: + case XDP_DROP: + break; + default: + bpf_warn_invalid_xdp_action(rx_bufq->vport->netdev, xdp_prog, + xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_bufq->vport->netdev, xdp_prog, xdp_act); + + return XDP_DROP; + } + + return xdp_act; +} + /** * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue * @rxq: Rx descriptor queue to retrieve receive buffer queue @@ -3126,6 +3285,11 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) struct idpf_queue *rx_bufq = NULL; struct sk_buff *skb = rxq->skb; u16 ntc = rxq->next_to_clean; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + + xdp_prog = rcu_dereference(rxq->xdp_prog); + xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); /* Process Rx packets bounded by budget */ while (likely(total_rx_pkts < budget)) { @@ -3133,11 +3297,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) struct idpf_sw_queue *refillq = NULL; struct idpf_rxq_set *rxq_set = NULL; struct idpf_rx_buf *rx_buf = NULL; + unsigned int xdp_act = XDP_PASS; union virtchnl2_rx_desc *desc; unsigned int pkt_len = 0; unsigned int hdr_len = 0; u16 gen_id, buf_id = 0; - /* Header buffer overflow only valid for header split */ + /* Header buffer overflow only valid for header split */ bool hbo = false; int bufq_id; u8 rxdid; @@ -3219,17 +3384,31 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) u64_stats_update_end(&rxq->stats_sync); } - if (pkt_len) { - idpf_rx_sync_for_cpu(rx_buf, pkt_len); - if (skb) - idpf_rx_add_frag(rx_buf, skb, pkt_len); - else - skb = idpf_rx_construct_skb(rxq, rx_buf, - pkt_len); - } else { + if (!pkt_len) { + idpf_rx_put_page(rx_buf); + goto pkt_len_zero; + } + + idpf_rx_sync_for_cpu(rx_buf, pkt_len); + if (xdp_prog) + xdp_act = idpf_run_xdp(rx_bufq, rx_buf, &xdp, xdp_prog, + pkt_len); + if (xdp_act != XDP_PASS) { idpf_rx_put_page(rx_buf); + + total_rx_bytes += pkt_len; + total_rx_pkts++; + idpf_rx_post_buf_refill(refillq, buf_id); + IDPF_RX_BUMP_NTC(rxq, ntc); + continue; } + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); + else + skb = idpf_rx_construct_skb(rxq, rx_buf, + pkt_len); +pkt_len_zero: /* exit if we failed to retrieve a buffer */ if (!skb) break; @@ -3960,13 +4139,24 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) */ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) { + bool is_xdp_prog_ena = idpf_xdp_is_prog_ena(vport); u16 num_txq_grp = vport->num_txq_grp; int i, j, qv_idx, bufq_vidx = 0; struct idpf_rxq_group *rx_qgrp; struct idpf_txq_group *tx_qgrp; struct idpf_queue *q, *bufq; + int num_active_rxq; u16 q_index; + if (is_xdp_prog_ena) + /* XDP Tx queues are handled within Rx loop, + * correct num_txq_grp so that it stores number of + * regular Tx queue groups. This way when we later assign Tx to + * qvector, we go only through regular Tx queues. + */ + if (idpf_is_queue_model_split(vport->txq_model)) + num_txq_grp = vport->xdp_txq_offset; + for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { u16 num_rxq; @@ -3976,6 +4166,8 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) else num_rxq = rx_qgrp->singleq.num_rxq; + num_active_rxq = num_rxq - vport->num_xdp_rxq; + for (j = 0; j < num_rxq; j++) { if (qv_idx >= vport->num_q_vectors) qv_idx = 0; @@ -3988,6 +4180,30 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q_index = q->q_vector->num_rxq; q->q_vector->rx[q_index] = q; q->q_vector->num_rxq++; + + /* Do not setup XDP Tx queues for dummy Rx queues. */ + if (j >= num_active_rxq) + goto skip_xdp_txq_config; + + if (is_xdp_prog_ena) { + if (idpf_is_queue_model_split(vport->txq_model)) { + tx_qgrp = &vport->txq_grps[i + vport->xdp_txq_offset]; + q = tx_qgrp->complq; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + } else { + tx_qgrp = &vport->txq_grps[i]; + q = tx_qgrp->txqs[j + vport->xdp_txq_offset]; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + } + } + +skip_xdp_txq_config: qv_idx++; } @@ -4021,6 +4237,9 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q->q_vector->num_txq++; qv_idx++; } else { + num_txq = is_xdp_prog_ena ? tx_qgrp->num_txq - vport->xdp_txq_offset + : tx_qgrp->num_txq; + for (j = 0; j < num_txq; j++) { if (qv_idx >= vport->num_q_vectors) qv_idx = 0; @@ -4122,6 +4341,13 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) DIV_ROUND_UP(vport->num_rxq_grp, vport->num_q_vectors); + /* For XDP we assign both Tx and XDP Tx queues + * to the same q_vector. + * Reserve doubled number of Tx queues per vector. + */ + if (idpf_xdp_is_prog_ena(vport)) + txqs_per_vector *= 2; + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; q_vector->vport = vport; @@ -4242,6 +4468,15 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + /* When we use this code for legacy devices (e.g. in AVF driver), some + * Rx queues may not be used because we would not be able to create XDP + * Tx queues for them. In such a case do not add their queue IDs to the + * RSS LUT by setting the number of active Rx queues to XDP Tx queues + * count. + */ + if (idpf_xdp_is_prog_ena(vport)) + num_active_rxq -= vport->num_xdp_rxq; + for (i = 0; i < rss_data->rss_lut_size; i++) { rss_data->rss_lut[i] = i % num_active_rxq; rss_data->cached_lut[i] = rss_data->rss_lut[i]; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index df76493faa7569..5013053c90648f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -164,6 +164,8 @@ do { \ #define IDPF_TX_FLAGS_IPV6 BIT(2) #define IDPF_TX_FLAGS_TUNNEL BIT(3) +#define IDPF_XDP_MAX_MTU 3046 + union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ @@ -463,6 +465,7 @@ enum idpf_queue_flags_t { __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, __IDPF_Q_POLL_MODE, + __IDPF_Q_XDP, __IDPF_Q_FLAGS_NBITS, }; @@ -733,6 +736,9 @@ struct idpf_queue { dma_addr_t dma; void *desc_ring; + struct bpf_prog *xdp_prog; + struct xdp_rxq_info xdp_rxq; + u16 tx_max_bufs; u8 tx_min_pkt_len; @@ -993,6 +999,7 @@ int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport); +int idpf_xdp_rxq_info_init_all(struct idpf_vport *vport); void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size); struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index e276b5360c2ed8..c9a1076aa58a3f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -3228,6 +3228,17 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) vec_info.default_vport = vport->default_vport; vec_info.index = vport->idx; + /* Additional XDP Tx queues share the q_vector with regular Tx and Rx + * queues to which they are assigned. Also, XDP shall request additional + * Tx queues via VIRTCHNL. Therefore, to avoid exceeding over + * "vport->q_vector_idxs array", do not request empty q_vectors + * for XDP Tx queues. + */ + if (idpf_xdp_is_prog_ena(vport)) + vec_info.num_req_vecs = max_t(u16, + vport->num_txq - vport->num_xdp_txq, + vport->num_rxq); + num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, vport->q_vector_idxs, &vec_info); @@ -3284,6 +3295,11 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); + if (idpf_xdp_is_prog_ena(vport)) + idpf_vport_set_hsplit(vport, false); + else + idpf_vport_set_hsplit(vport, true); + idpf_vport_init_num_qs(vport, vport_msg); idpf_vport_calc_num_q_desc(vport); idpf_vport_calc_num_q_groups(vport);