@@ -568,6 +568,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
568568 .truesize = rx_ring -> truesize ,
569569 .count = rx_ring -> count ,
570570 };
571+ const struct libeth_fq_fp hdr_fq = {
572+ .pp = rx_ring -> hdr_pp ,
573+ .fqes = rx_ring -> hdr_fqes ,
574+ .truesize = rx_ring -> hdr_truesize ,
575+ .count = rx_ring -> count ,
576+ };
571577 u16 ntu = rx_ring -> next_to_use ;
572578
573579 /* nothing to do or no valid netdev defined */
@@ -584,6 +590,14 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
584590
585591 rx_desc -> read .pkt_addr = cpu_to_le64 (addr );
586592
593+ if (!hdr_fq .pp )
594+ goto next ;
595+
596+ addr = libeth_rx_alloc (& hdr_fq , ntu );
597+ if (addr == DMA_MAPPING_ERROR )
598+ return ;
599+
600+ next :
587601 rx_desc ++ ;
588602 ntu ++ ;
589603 if (unlikely (ntu == fq .count )) {
@@ -781,6 +795,32 @@ static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
781795 return result ;
782796}
783797
798+ static u32 ixgbevf_rx_hsplit_wa (const struct libeth_fqe * hdr ,
799+ struct libeth_fqe * buf , u32 data_len )
800+ {
801+ u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN ;
802+ struct page * hdr_page , * buf_page ;
803+ const void * src ;
804+ void * dst ;
805+
806+ if (unlikely (netmem_is_net_iov (buf -> netmem )) ||
807+ !libeth_rx_sync_for_cpu (buf , copy ))
808+ return 0 ;
809+
810+ hdr_page = __netmem_to_page (hdr -> netmem );
811+ buf_page = __netmem_to_page (buf -> netmem );
812+
813+ dst = page_address (hdr_page ) + hdr -> offset +
814+ pp_page_to_nmdesc (hdr_page )-> pp -> p .offset ;
815+ src = page_address (buf_page ) + buf -> offset +
816+ pp_page_to_nmdesc (buf_page )-> pp -> p .offset ;
817+
818+ memcpy (dst , src , LARGEST_ALIGN (copy ));
819+ buf -> offset += copy ;
820+
821+ return copy ;
822+ }
823+
784824static int ixgbevf_clean_rx_irq (struct ixgbevf_q_vector * q_vector ,
785825 struct ixgbevf_ring * rx_ring ,
786826 int budget )
@@ -818,6 +858,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
818858 rmb ();
819859
820860 rx_buffer = & rx_ring -> rx_fqes [rx_ring -> next_to_clean ];
861+
862+ if (unlikely (rx_ring -> hdr_pp )) {
863+ struct libeth_fqe * hdr_buff ;
864+ unsigned int hdr_size = 0 ;
865+
866+ hdr_buff = & rx_ring -> hdr_fqes [rx_ring -> next_to_clean ];
867+
868+ if (!xdp -> data ) {
869+ hdr_size = ixgbevf_rx_hsplit_wa (hdr_buff ,
870+ rx_buffer ,
871+ size );
872+ size -= hdr_size ? : size ;
873+ }
874+
875+ libeth_xdp_process_buff (xdp , hdr_buff , hdr_size );
876+ }
877+
821878 libeth_xdp_process_buff (xdp , rx_buffer , size );
822879
823880 cleaned_count ++ ;
@@ -3054,19 +3111,38 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
30543111 return err ;
30553112}
30563113
3057- /**
3058- * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3059- * @adapter: board private structure
3060- * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3061- *
3062- * Returns 0 on success, negative on failure
3063- **/
3064- int ixgbevf_setup_rx_resources (struct ixgbevf_adapter * adapter ,
3065- struct ixgbevf_ring * rx_ring )
3114+ static void ixgbvf_rx_destroy_pp (struct ixgbevf_ring * rx_ring )
30663115{
3116+ struct libeth_fq fq = {
3117+ .pp = rx_ring -> pp ,
3118+ .fqes = rx_ring -> rx_fqes ,
3119+ };
3120+
3121+ libeth_rx_fq_destroy (& fq );
3122+ rx_ring -> rx_fqes = NULL ;
3123+ rx_ring -> pp = NULL ;
3124+
3125+ if (!rx_ring -> hdr_pp )
3126+ return ;
3127+
3128+ fq = (struct libeth_fq ) {
3129+ .pp = rx_ring -> hdr_pp ,
3130+ .fqes = rx_ring -> hdr_fqes ,
3131+ };
3132+
3133+ libeth_rx_fq_destroy (& fq );
3134+ rx_ring -> hdr_fqes = NULL ;
3135+ rx_ring -> hdr_pp = NULL ;
3136+ }
3137+
3138+ static int ixgbvf_rx_create_pp (struct ixgbevf_ring * rx_ring )
3139+ {
3140+ u32 adapter_flags = rx_ring -> q_vector -> adapter -> flags ;
3141+
30673142 struct libeth_fq fq = {
30683143 .count = rx_ring -> count ,
30693144 .nid = NUMA_NO_NODE ,
3145+ .hsplit = adapter_flags & IXGBEVF_FLAG_HSPLIT ,
30703146 .type = LIBETH_FQE_MTU ,
30713147 .xdp = !!rx_ring -> xdp_prog ,
30723148 .buf_len = IXGBEVF_RX_PAGE_LEN (rx_ring -> xdp_prog ?
@@ -3084,34 +3160,75 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
30843160 rx_ring -> truesize = fq .truesize ;
30853161 rx_ring -> rx_buf_len = fq .buf_len ;
30863162
3163+ if (!fq .hsplit )
3164+ return 0 ;
3165+
3166+ fq = (struct libeth_fq ) {
3167+ .count = rx_ring -> count ,
3168+ .nid = NUMA_NO_NODE ,
3169+ .type = LIBETH_FQE_HDR ,
3170+ .xdp = !!rx_ring -> xdp_prog ,
3171+ };
3172+
3173+ ret = libeth_rx_fq_create (& fq , & rx_ring -> q_vector -> napi );
3174+ if (ret )
3175+ goto err ;
3176+
3177+ rx_ring -> hdr_pp = fq .pp ;
3178+ rx_ring -> hdr_fqes = fq .fqes ;
3179+ rx_ring -> hdr_truesize = fq .truesize ;
3180+ rx_ring -> hdr_buf_len = fq .buf_len ;
3181+
3182+ return 0 ;
3183+
3184+ err :
3185+ ixgbvf_rx_destroy_pp (rx_ring );
3186+ return ret ;
3187+ }
3188+
3189+ /**
3190+ * ixgbevf_setup_rx_resources - allocate Rx resources
3191+ * @adapter: board private structure
3192+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3193+ *
3194+ * Returns: 0 on success, negative on failure.
3195+ **/
3196+ int ixgbevf_setup_rx_resources (struct ixgbevf_adapter * adapter ,
3197+ struct ixgbevf_ring * rx_ring )
3198+ {
3199+ int ret ;
3200+
3201+ ret = ixgbvf_rx_create_pp (rx_ring );
3202+ if (ret )
3203+ return ret ;
3204+
30873205 u64_stats_init (& rx_ring -> syncp );
30883206
30893207 /* Round up to nearest 4K */
30903208 rx_ring -> size = rx_ring -> count * sizeof (union ixgbe_adv_rx_desc );
30913209 rx_ring -> size = ALIGN (rx_ring -> size , 4096 );
30923210
3093- rx_ring -> desc = dma_alloc_coherent (fq . pp -> p .dev , rx_ring -> size ,
3211+ rx_ring -> desc = dma_alloc_coherent (rx_ring -> pp -> p .dev , rx_ring -> size ,
30943212 & rx_ring -> dma , GFP_KERNEL );
30953213
30963214 if (!rx_ring -> desc )
30973215 goto err ;
30983216
30993217 /* XDP RX-queue info */
31003218 ret = __xdp_rxq_info_reg (& rx_ring -> xdp_rxq , adapter -> netdev ,
3101- rx_ring -> queue_index , 0 , fq . buf_len );
3219+ rx_ring -> queue_index , 0 , rx_ring -> rx_buf_len );
31023220 if (ret )
31033221 goto err ;
31043222
3105- xdp_rxq_info_attach_page_pool (& rx_ring -> xdp_rxq , fq . pp );
3223+ xdp_rxq_info_attach_page_pool (& rx_ring -> xdp_rxq , rx_ring -> pp );
31063224
31073225 rx_ring -> xdp_prog = adapter -> xdp_prog ;
31083226
31093227 return 0 ;
31103228err :
3111- libeth_rx_fq_destroy (& fq );
3112- rx_ring -> rx_fqes = NULL ;
3113- rx_ring -> pp = NULL ;
3229+ ixgbvf_rx_destroy_pp (rx_ring );
31143230 dev_err (rx_ring -> dev , "Unable to allocate memory for the Rx descriptor ring\n" );
3231+
31153232 return ret ;
31163233}
31173234
@@ -4222,6 +4339,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
42224339 netdev -> priv_flags |= IFF_UNICAST_FLT ;
42234340 netdev -> xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_RX_SG ;
42244341
4342+ if (adapter -> hw .mac .type == ixgbe_mac_82599_vf )
4343+ adapter -> flags |= IXGBEVF_FLAG_HSPLIT ;
4344+
42254345 /* MTU range: 68 - 1504 or 9710 */
42264346 netdev -> min_mtu = ETH_MIN_MTU ;
42274347 switch (adapter -> hw .api_version ) {
0 commit comments