Skip to content

Commit e6bc7c2

Browse files
jahay1roxanan1996
authored andcommitted
idpf: improve when to set RE bit logic
jira KERNEL-170 commit-author Joshua Hay <joshua.a.hay@intel.com> commit f2d18e1 Track the gap between next_to_use and the last RE index. Set RE again if the gap is large enough to ensure RE bit is set frequently. This is critical before removing the stashing mechanisms because the opportunistic descriptor ring cleaning from the out-of-order completions will go away. Previously the descriptors would be "cleaned" by both the descriptor (RE) completion and the out-of-order completions. Without the latter, we must ensure the RE bit is set more frequently. Otherwise, it's theoretically possible for the descriptor ring next_to_clean to never advance. The previous implementation was dependent on the start of a packet falling on a 64th index in the descriptor ring, which is not guaranteed with large packets. Signed-off-by: Luigi Rizzo <lrizzo@google.com> Signed-off-by: Brian Vazquez <brianvv@google.com> Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Tested-by: Samuel Salin <Samuel.salin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> (cherry picked from commit f2d18e1) Signed-off-by: Roxana Nicolescu <rnicolescu@ciq.com>
1 parent 6dd3f1c commit e6bc7c2

2 files changed

Lines changed: 23 additions & 3 deletions

File tree

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,6 +293,8 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
293293
*/
294294
idpf_queue_change(GEN_CHK, refillq);
295295

296+
tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
297+
296298
return 0;
297299

298300
err_alloc:
@@ -2793,6 +2795,21 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
27932795
return NETDEV_TX_OK;
27942796
}
27952797

2798+
/**
2799+
* idpf_tx_splitq_need_re - check whether RE bit needs to be set
2800+
* @tx_q: pointer to Tx queue
2801+
*
2802+
* Return: true if RE bit needs to be set, false otherwise
2803+
*/
2804+
static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
2805+
{
2806+
int gap = tx_q->next_to_use - tx_q->last_re;
2807+
2808+
gap += (gap < 0) ? tx_q->desc_count : 0;
2809+
2810+
return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
2811+
}
2812+
27962813
/**
27972814
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
27982815
* @skb: send buffer
@@ -2873,9 +2890,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
28732890
* MIN_RING size to ensure it will be set at least once each
28742891
* time around the ring.
28752892
*/
2876-
if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2893+
if (idpf_tx_splitq_need_re(tx_q)) {
28772894
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
28782895
tx_q->txq_grp->num_completions_pending++;
2896+
tx_q->last_re = tx_q->next_to_use;
28792897
}
28802898

28812899
if (skb->ip_summed == CHECKSUM_PARTIAL)

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -604,6 +604,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
604604
* @netdev: &net_device corresponding to this queue
605605
* @next_to_use: Next descriptor to use
606606
* @next_to_clean: Next descriptor to clean
607+
* @last_re: last descriptor index that RE bit was set
608+
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
607609
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
608610
* the TX completion queue, it can be for any TXQ associated
609611
* with that completion queue. This means we can clean up to
@@ -614,7 +616,6 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
614616
* only once at the end of the cleaning routine.
615617
* @clean_budget: singleq only, queue cleaning budget
616618
* @cleaned_pkts: Number of packets cleaned for the above said case
617-
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
618619
* @stash: Tx buffer stash for Flow-based scheduling mode
619620
* @refillq: Pointer to refill queue
620621
* @compl_tag_bufid_m: Completion tag buffer id mask
@@ -655,14 +656,15 @@ struct idpf_tx_queue {
655656
__cacheline_group_begin_aligned(read_write);
656657
u16 next_to_use;
657658
u16 next_to_clean;
659+
u16 last_re;
660+
u16 tx_max_bufs;
658661

659662
union {
660663
u32 cleaned_bytes;
661664
u32 clean_budget;
662665
};
663666
u16 cleaned_pkts;
664667

665-
u16 tx_max_bufs;
666668
struct idpf_txq_stash *stash;
667669
struct idpf_sw_queue *refillq;
668670

0 commit comments

Comments
 (0)