Skip to content

Commit cdff031

Browse files
jahay1PlaidCat
authored andcommitted
idpf: simplify and fix splitq Tx packet rollback error path
jira KERNEL-168 commit-author Joshua Hay <joshua.a.hay@intel.com> commit b61dfa9 upstream-diff | adjusted context in 2 places: - when removing func idpf_tx_dma_map_error due to different memset call that uses the hardcoded struct type; - in func idpf_tx_splitq_frame due to missing expected union idpf_flex_tx_ctx_desc *ctx_desc; both differences were introduced in commit 1a49cf8 ("idpf: add Tx timestamp flows"). Move (and rename) the existing rollback logic to singleq.c since that will be the only consumer. Create a simplified splitq specific rollback function to loop through and unmap tx_bufs based on the completion tag. This is critical before replacing the Tx buffer ring with the buffer pool since the previous rollback indexing will not work to unmap the chained buffers from the pool. Cache the next_to_use index before any portion of the packet is put on the descriptor ring. In case of an error, the rollback will bump tail to the correct next_to_use value. Because the splitq path now supports different types of context descriptors (and potentially multiple in the future), this will take care of rolling back any and all context descriptors encoded on the ring for the erroneous packet. The previous rollback logic was broken for PTP packets since it would not account for the PTP context descriptor. Fixes: 1a49cf8 ("idpf: add Tx timestamp flows") Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Tested-by: Samuel Salin <Samuel.salin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> (cherry picked from commit b61dfa9) Signed-off-by: Roxana Nicolescu <rnicolescu@ciq.com>
1 parent a8bb03f commit cdff031

3 files changed

Lines changed: 95 additions & 58 deletions

File tree

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
178178
return 1;
179179
}
180180

181+
/**
182+
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
183+
* @txq: queue to send buffer on
184+
* @skb: send buffer
185+
* @first: original first buffer info buffer for packet
186+
* @idx: starting point on ring to unwind
187+
*/
188+
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
189+
struct sk_buff *skb,
190+
struct idpf_tx_buf *first, u16 idx)
191+
{
192+
struct libeth_sq_napi_stats ss = { };
193+
struct libeth_cq_pp cp = {
194+
.dev = txq->dev,
195+
.ss = &ss,
196+
};
197+
198+
u64_stats_update_begin(&txq->stats_sync);
199+
u64_stats_inc(&txq->q_stats.dma_map_errs);
200+
u64_stats_update_end(&txq->stats_sync);
201+
202+
/* clear dma mappings for failed tx_buf map */
203+
for (;;) {
204+
struct idpf_tx_buf *tx_buf;
205+
206+
tx_buf = &txq->tx_buf[idx];
207+
libeth_tx_complete(tx_buf, &cp);
208+
if (tx_buf == first)
209+
break;
210+
if (idx == 0)
211+
idx = txq->desc_count;
212+
idx--;
213+
}
214+
215+
if (skb_is_gso(skb)) {
216+
union idpf_tx_flex_desc *tx_desc;
217+
218+
/* If we failed a DMA mapping for a TSO packet, we will have
219+
* used one additional descriptor for a context
220+
* descriptor. Reset that here.
221+
*/
222+
tx_desc = &txq->flex_tx[idx];
223+
memset(tx_desc, 0, sizeof(*tx_desc));
224+
if (idx == 0)
225+
idx = txq->desc_count;
226+
idx--;
227+
}
228+
229+
/* Update tail in case netdev_xmit_more was previously true */
230+
idpf_tx_buf_hw_update(txq, idx, false);
231+
}
232+
181233
/**
182234
* idpf_tx_singleq_map - Build the Tx base descriptor
183235
* @tx_q: queue to send buffer on
@@ -218,8 +270,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
218270
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
219271
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
220272

221-
if (dma_mapping_error(tx_q->dev, dma))
222-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
273+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
274+
return idpf_tx_singleq_dma_map_error(tx_q, skb,
275+
first, i);
223276

224277
/* record length, and DMA address */
225278
dma_unmap_len_set(tx_buf, len, size);

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 37 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2285,57 +2285,6 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
22852285
return count;
22862286
}
22872287

2288-
/**
2289-
* idpf_tx_dma_map_error - handle TX DMA map errors
2290-
* @txq: queue to send buffer on
2291-
* @skb: send buffer
2292-
* @first: original first buffer info buffer for packet
2293-
* @idx: starting point on ring to unwind
2294-
*/
2295-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2296-
struct idpf_tx_buf *first, u16 idx)
2297-
{
2298-
struct libeth_sq_napi_stats ss = { };
2299-
struct libeth_cq_pp cp = {
2300-
.dev = txq->dev,
2301-
.ss = &ss,
2302-
};
2303-
2304-
u64_stats_update_begin(&txq->stats_sync);
2305-
u64_stats_inc(&txq->q_stats.dma_map_errs);
2306-
u64_stats_update_end(&txq->stats_sync);
2307-
2308-
/* clear dma mappings for failed tx_buf map */
2309-
for (;;) {
2310-
struct idpf_tx_buf *tx_buf;
2311-
2312-
tx_buf = &txq->tx_buf[idx];
2313-
libeth_tx_complete(tx_buf, &cp);
2314-
if (tx_buf == first)
2315-
break;
2316-
if (idx == 0)
2317-
idx = txq->desc_count;
2318-
idx--;
2319-
}
2320-
2321-
if (skb_is_gso(skb)) {
2322-
union idpf_tx_flex_desc *tx_desc;
2323-
2324-
/* If we failed a DMA mapping for a TSO packet, we will have
2325-
* used one additional descriptor for a context
2326-
* descriptor. Reset that here.
2327-
*/
2328-
tx_desc = &txq->flex_tx[idx];
2329-
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2330-
if (idx == 0)
2331-
idx = txq->desc_count;
2332-
idx--;
2333-
}
2334-
2335-
/* Update tail in case netdev_xmit_more was previously true */
2336-
idpf_tx_buf_hw_update(txq, idx, false);
2337-
}
2338-
23392288
/**
23402289
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
23412290
* @txq: the tx ring to wrap
@@ -2384,6 +2333,37 @@ static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
23842333
return true;
23852334
}
23862335

2336+
/**
2337+
* idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2338+
* @txq: Tx queue to unwind
2339+
* @params: pointer to splitq params struct
2340+
* @first: starting buffer for packet to unmap
2341+
*/
2342+
static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2343+
struct idpf_tx_splitq_params *params,
2344+
struct idpf_tx_buf *first)
2345+
{
2346+
struct libeth_sq_napi_stats ss = { };
2347+
struct idpf_tx_buf *tx_buf = first;
2348+
struct libeth_cq_pp cp = {
2349+
.dev = txq->dev,
2350+
.ss = &ss,
2351+
};
2352+
u32 idx = 0;
2353+
2354+
u64_stats_update_begin(&txq->stats_sync);
2355+
u64_stats_inc(&txq->q_stats.dma_map_errs);
2356+
u64_stats_update_end(&txq->stats_sync);
2357+
2358+
do {
2359+
libeth_tx_complete(tx_buf, &cp);
2360+
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
2361+
} while (idpf_tx_buf_compl_tag(tx_buf) == params->compl_tag);
2362+
2363+
/* Update tail in case netdev_xmit_more was previously true. */
2364+
idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2365+
}
2366+
23872367
/**
23882368
* idpf_tx_splitq_map - Build the Tx flex descriptor
23892369
* @tx_q: queue to send buffer on
@@ -2428,8 +2408,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
24282408
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
24292409
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
24302410

2431-
if (dma_mapping_error(tx_q->dev, dma))
2432-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
2411+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
2412+
return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2413+
first);
24332414

24342415
first->nr_frags++;
24352416
idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
@@ -2818,7 +2799,9 @@ static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
28182799
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
28192800
struct idpf_tx_queue *tx_q)
28202801
{
2821-
struct idpf_tx_splitq_params tx_params = { };
2802+
struct idpf_tx_splitq_params tx_params = {
2803+
.prev_ntu = tx_q->next_to_use,
2804+
};
28222805
struct idpf_tx_buf *first;
28232806
unsigned int count;
28242807
int tso;

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,7 @@ struct idpf_tx_offload_params {
200200
* @compl_tag: Associated tag for completion
201201
* @td_tag: Descriptor tunneling tag
202202
* @offload: Offload parameters
203+
* @prev_ntu: stored TxQ next_to_use in case of rollback
203204
*/
204205
struct idpf_tx_splitq_params {
205206
enum idpf_tx_desc_dtype_value dtype;
@@ -210,6 +211,8 @@ struct idpf_tx_splitq_params {
210211
};
211212

212213
struct idpf_tx_offload_params offload;
214+
215+
u16 prev_ntu;
213216
};
214217

215218
enum idpf_tx_ctx_desc_eipt_offload {
@@ -1131,8 +1134,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
11311134
bool xmit_more);
11321135
unsigned int idpf_size_to_txd_count(unsigned int size);
11331136
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1134-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1135-
struct idpf_tx_buf *first, u16 ring_idx);
11361137
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
11371138
struct sk_buff *skb);
11381139
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);

0 commit comments

Comments
 (0)