Skip to content

Commit 44159dd

Browse files
jahay1roxanan1996
authored andcommitted
idpf: simplify and fix splitq Tx packet rollback error path
jira KERNEL-170 commit-author Joshua Hay <joshua.a.hay@intel.com> commit b61dfa9 upstream-diff | adjusted context in 2 places: - when removing func idpf_tx_dma_map_error due to different memset call that uses the hardcoded struct type; - in func idpf_tx_splitq_frame due to missing expected union idpf_flex_tx_ctx_desc *ctx_desc; both differences were introduced in commit 1a49cf8 ("idpf: add Tx timestamp flows"). Move (and rename) the existing rollback logic to singleq.c since that will be the only consumer. Create a simplified splitq specific rollback function to loop through and unmap tx_bufs based on the completion tag. This is critical before replacing the Tx buffer ring with the buffer pool since the previous rollback indexing will not work to unmap the chained buffers from the pool. Cache the next_to_use index before any portion of the packet is put on the descriptor ring. In case of an error, the rollback will bump tail to the correct next_to_use value. Because the splitq path now supports different types of context descriptors (and potentially multiple in the future), this will take care of rolling back any and all context descriptors encoded on the ring for the erroneous packet. The previous rollback logic was broken for PTP packets since it would not account for the PTP context descriptor. Fixes: 1a49cf8 ("idpf: add Tx timestamp flows") Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Tested-by: Samuel Salin <Samuel.salin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> (cherry picked from commit b61dfa9) Signed-off-by: Roxana Nicolescu <rnicolescu@ciq.com>
1 parent e6bc7c2 commit 44159dd

3 files changed

Lines changed: 95 additions & 58 deletions

File tree

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
179179
return 1;
180180
}
181181

182+
/**
183+
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
184+
* @txq: queue to send buffer on
185+
* @skb: send buffer
186+
* @first: original first buffer info buffer for packet
187+
* @idx: starting point on ring to unwind
188+
*/
189+
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
190+
struct sk_buff *skb,
191+
struct idpf_tx_buf *first, u16 idx)
192+
{
193+
struct libeth_sq_napi_stats ss = { };
194+
struct libeth_cq_pp cp = {
195+
.dev = txq->dev,
196+
.ss = &ss,
197+
};
198+
199+
u64_stats_update_begin(&txq->stats_sync);
200+
u64_stats_inc(&txq->q_stats.dma_map_errs);
201+
u64_stats_update_end(&txq->stats_sync);
202+
203+
/* clear dma mappings for failed tx_buf map */
204+
for (;;) {
205+
struct idpf_tx_buf *tx_buf;
206+
207+
tx_buf = &txq->tx_buf[idx];
208+
libeth_tx_complete(tx_buf, &cp);
209+
if (tx_buf == first)
210+
break;
211+
if (idx == 0)
212+
idx = txq->desc_count;
213+
idx--;
214+
}
215+
216+
if (skb_is_gso(skb)) {
217+
union idpf_tx_flex_desc *tx_desc;
218+
219+
/* If we failed a DMA mapping for a TSO packet, we will have
220+
* used one additional descriptor for a context
221+
* descriptor. Reset that here.
222+
*/
223+
tx_desc = &txq->flex_tx[idx];
224+
memset(tx_desc, 0, sizeof(*tx_desc));
225+
if (idx == 0)
226+
idx = txq->desc_count;
227+
idx--;
228+
}
229+
230+
/* Update tail in case netdev_xmit_more was previously true */
231+
idpf_tx_buf_hw_update(txq, idx, false);
232+
}
233+
182234
/**
183235
* idpf_tx_singleq_map - Build the Tx base descriptor
184236
* @tx_q: queue to send buffer on
@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
219271
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
220272
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
221273

222-
if (dma_mapping_error(tx_q->dev, dma))
223-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
274+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
275+
return idpf_tx_singleq_dma_map_error(tx_q, skb,
276+
first, i);
224277

225278
/* record length, and DMA address */
226279
dma_unmap_len_set(tx_buf, len, size);

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 37 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2287,57 +2287,6 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
22872287
return count;
22882288
}
22892289

2290-
/**
2291-
* idpf_tx_dma_map_error - handle TX DMA map errors
2292-
* @txq: queue to send buffer on
2293-
* @skb: send buffer
2294-
* @first: original first buffer info buffer for packet
2295-
* @idx: starting point on ring to unwind
2296-
*/
2297-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2298-
struct idpf_tx_buf *first, u16 idx)
2299-
{
2300-
struct libeth_sq_napi_stats ss = { };
2301-
struct libeth_cq_pp cp = {
2302-
.dev = txq->dev,
2303-
.ss = &ss,
2304-
};
2305-
2306-
u64_stats_update_begin(&txq->stats_sync);
2307-
u64_stats_inc(&txq->q_stats.dma_map_errs);
2308-
u64_stats_update_end(&txq->stats_sync);
2309-
2310-
/* clear dma mappings for failed tx_buf map */
2311-
for (;;) {
2312-
struct idpf_tx_buf *tx_buf;
2313-
2314-
tx_buf = &txq->tx_buf[idx];
2315-
libeth_tx_complete(tx_buf, &cp);
2316-
if (tx_buf == first)
2317-
break;
2318-
if (idx == 0)
2319-
idx = txq->desc_count;
2320-
idx--;
2321-
}
2322-
2323-
if (skb_is_gso(skb)) {
2324-
union idpf_tx_flex_desc *tx_desc;
2325-
2326-
/* If we failed a DMA mapping for a TSO packet, we will have
2327-
* used one additional descriptor for a context
2328-
* descriptor. Reset that here.
2329-
*/
2330-
tx_desc = &txq->flex_tx[idx];
2331-
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2332-
if (idx == 0)
2333-
idx = txq->desc_count;
2334-
idx--;
2335-
}
2336-
2337-
/* Update tail in case netdev_xmit_more was previously true */
2338-
idpf_tx_buf_hw_update(txq, idx, false);
2339-
}
2340-
23412290
/**
23422291
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
23432292
* @txq: the tx ring to wrap
@@ -2386,6 +2335,37 @@ static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
23862335
return true;
23872336
}
23882337

2338+
/**
2339+
* idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2340+
* @txq: Tx queue to unwind
2341+
* @params: pointer to splitq params struct
2342+
* @first: starting buffer for packet to unmap
2343+
*/
2344+
static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2345+
struct idpf_tx_splitq_params *params,
2346+
struct idpf_tx_buf *first)
2347+
{
2348+
struct libeth_sq_napi_stats ss = { };
2349+
struct idpf_tx_buf *tx_buf = first;
2350+
struct libeth_cq_pp cp = {
2351+
.dev = txq->dev,
2352+
.ss = &ss,
2353+
};
2354+
u32 idx = 0;
2355+
2356+
u64_stats_update_begin(&txq->stats_sync);
2357+
u64_stats_inc(&txq->q_stats.dma_map_errs);
2358+
u64_stats_update_end(&txq->stats_sync);
2359+
2360+
do {
2361+
libeth_tx_complete(tx_buf, &cp);
2362+
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
2363+
} while (idpf_tx_buf_compl_tag(tx_buf) == params->compl_tag);
2364+
2365+
/* Update tail in case netdev_xmit_more was previously true. */
2366+
idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2367+
}
2368+
23892369
/**
23902370
* idpf_tx_splitq_map - Build the Tx flex descriptor
23912371
* @tx_q: queue to send buffer on
@@ -2430,8 +2410,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
24302410
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
24312411
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
24322412

2433-
if (dma_mapping_error(tx_q->dev, dma))
2434-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
2413+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
2414+
return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2415+
first);
24352416

24362417
first->nr_frags++;
24372418
idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
@@ -2820,7 +2801,9 @@ static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
28202801
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
28212802
struct idpf_tx_queue *tx_q)
28222803
{
2823-
struct idpf_tx_splitq_params tx_params = { };
2804+
struct idpf_tx_splitq_params tx_params = {
2805+
.prev_ntu = tx_q->next_to_use,
2806+
};
28242807
struct idpf_tx_buf *first;
28252808
unsigned int count;
28262809
int tso;

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ struct idpf_tx_offload_params {
194194
* @compl_tag: Associated tag for completion
195195
* @td_tag: Descriptor tunneling tag
196196
* @offload: Offload parameters
197+
* @prev_ntu: stored TxQ next_to_use in case of rollback
197198
*/
198199
struct idpf_tx_splitq_params {
199200
enum idpf_tx_desc_dtype_value dtype;
@@ -204,6 +205,8 @@ struct idpf_tx_splitq_params {
204205
};
205206

206207
struct idpf_tx_offload_params offload;
208+
209+
u16 prev_ntu;
207210
};
208211

209212
enum idpf_tx_ctx_desc_eipt_offload {
@@ -1031,8 +1034,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
10311034
bool xmit_more);
10321035
unsigned int idpf_size_to_txd_count(unsigned int size);
10331036
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1034-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1035-
struct idpf_tx_buf *first, u16 ring_idx);
10361037
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
10371038
struct sk_buff *skb);
10381039
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);

0 commit comments

Comments
 (0)