Skip to content

Commit cef58d2

Browse files
arndbgregkh
authored andcommitted
gve: DQO: avoid unused variable warnings
[ Upstream commit 1e0083b ] The use of dma_unmap_addr()/dma_unmap_len() in the driver causes multiple warnings when these macros are defined as empty, e.g. in an ARCH=i386 allmodconfig build: drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_tx_add_skb_no_copy_dqo': drivers/net/ethernet/google/gve/gve_tx_dqo.c:494:40: error: unused variable 'buf' [-Werror=unused-variable] 494 | struct gve_tx_dma_buf *buf = This is not how the NEED_DMA_MAP_STATE macros are meant to work, as they rely on never using local variables or a temporary structure like gve_tx_dma_buf. Remote the gve_tx_dma_buf definition and open-code the contents in all places to avoid the warning. This causes some rather long lines but otherwise ends up making the driver slightly smaller. Fixes: a57e5de ("gve: DQO: Add TX path") Link: https://lore.kernel.org/netdev/20210723231957.1113800-1-bcf@google.com/ Link: https://lore.kernel.org/netdev/20210721151100.2042139-1-arnd@kernel.org/ Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 9b955d5 commit cef58d2

3 files changed

Lines changed: 54 additions & 66 deletions

File tree

drivers/net/ethernet/google/gve/gve.h

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -224,19 +224,17 @@ struct gve_tx_iovec {
224224
u32 iov_padding; /* padding associated with this segment */
225225
};
226226

227-
struct gve_tx_dma_buf {
228-
DEFINE_DMA_UNMAP_ADDR(dma);
229-
DEFINE_DMA_UNMAP_LEN(len);
230-
};
231-
232227
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
233228
* ring entry but only used for a pkt_desc not a seg_desc
234229
*/
235230
struct gve_tx_buffer_state {
236231
struct sk_buff *skb; /* skb for this pkt */
237232
union {
238233
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
239-
struct gve_tx_dma_buf buf;
234+
struct {
235+
DEFINE_DMA_UNMAP_ADDR(dma);
236+
DEFINE_DMA_UNMAP_LEN(len);
237+
};
240238
};
241239
};
242240

@@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo {
280278
* All others correspond to `skb`'s frags and should be unmapped with
281279
* `dma_unmap_page`.
282280
*/
283-
struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
281+
DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
282+
DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
284283
u16 num_bufs;
285284

286285
/* Linked list index to next element in the list, or -1 if none */

drivers/net/ethernet/google/gve/gve_tx.c

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -303,15 +303,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
303303
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
304304
{
305305
if (info->skb) {
306-
dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
307-
dma_unmap_len(&info->buf, len),
306+
dma_unmap_single(dev, dma_unmap_addr(info, dma),
307+
dma_unmap_len(info, len),
308308
DMA_TO_DEVICE);
309-
dma_unmap_len_set(&info->buf, len, 0);
309+
dma_unmap_len_set(info, len, 0);
310310
} else {
311-
dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
312-
dma_unmap_len(&info->buf, len),
311+
dma_unmap_page(dev, dma_unmap_addr(info, dma),
312+
dma_unmap_len(info, len),
313313
DMA_TO_DEVICE);
314-
dma_unmap_len_set(&info->buf, len, 0);
314+
dma_unmap_len_set(info, len, 0);
315315
}
316316
}
317317

@@ -491,7 +491,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
491491
struct gve_tx_buffer_state *info;
492492
bool is_gso = skb_is_gso(skb);
493493
u32 idx = tx->req & tx->mask;
494-
struct gve_tx_dma_buf *buf;
495494
u64 addr;
496495
u32 len;
497496
int i;
@@ -515,9 +514,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
515514
tx->dma_mapping_error++;
516515
goto drop;
517516
}
518-
buf = &info->buf;
519-
dma_unmap_len_set(buf, len, len);
520-
dma_unmap_addr_set(buf, dma, addr);
517+
dma_unmap_len_set(info, len, len);
518+
dma_unmap_addr_set(info, dma, addr);
521519

522520
payload_nfrags = shinfo->nr_frags;
523521
if (hlen < len) {
@@ -549,10 +547,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
549547
tx->dma_mapping_error++;
550548
goto unmap_drop;
551549
}
552-
buf = &tx->info[idx].buf;
553550
tx->info[idx].skb = NULL;
554-
dma_unmap_len_set(buf, len, len);
555-
dma_unmap_addr_set(buf, dma, addr);
551+
dma_unmap_len_set(&tx->info[idx], len, len);
552+
dma_unmap_addr_set(&tx->info[idx], dma, addr);
556553

557554
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
558555
}

drivers/net/ethernet/google/gve/gve_tx_dqo.c

Lines changed: 38 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
8585
int j;
8686

8787
for (j = 0; j < cur_state->num_bufs; j++) {
88-
struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
89-
9088
if (j == 0) {
9189
dma_unmap_single(tx->dev,
92-
dma_unmap_addr(buf, dma),
93-
dma_unmap_len(buf, len),
94-
DMA_TO_DEVICE);
90+
dma_unmap_addr(cur_state, dma[j]),
91+
dma_unmap_len(cur_state, len[j]),
92+
DMA_TO_DEVICE);
9593
} else {
9694
dma_unmap_page(tx->dev,
97-
dma_unmap_addr(buf, dma),
98-
dma_unmap_len(buf, len),
99-
DMA_TO_DEVICE);
95+
dma_unmap_addr(cur_state, dma[j]),
96+
dma_unmap_len(cur_state, len[j]),
97+
DMA_TO_DEVICE);
10098
}
10199
}
102100
if (cur_state->skb) {
@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
457455
const bool is_gso = skb_is_gso(skb);
458456
u32 desc_idx = tx->dqo_tx.tail;
459457

460-
struct gve_tx_pending_packet_dqo *pending_packet;
458+
struct gve_tx_pending_packet_dqo *pkt;
461459
struct gve_tx_metadata_dqo metadata;
462460
s16 completion_tag;
463461
int i;
464462

465-
pending_packet = gve_alloc_pending_packet(tx);
466-
pending_packet->skb = skb;
467-
pending_packet->num_bufs = 0;
468-
completion_tag = pending_packet - tx->dqo.pending_packets;
463+
pkt = gve_alloc_pending_packet(tx);
464+
pkt->skb = skb;
465+
pkt->num_bufs = 0;
466+
completion_tag = pkt - tx->dqo.pending_packets;
469467

470468
gve_extract_tx_metadata_dqo(skb, &metadata);
471469
if (is_gso) {
@@ -493,27 +491,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
493491

494492
/* Map the linear portion of skb */
495493
{
496-
struct gve_tx_dma_buf *buf =
497-
&pending_packet->bufs[pending_packet->num_bufs];
498494
u32 len = skb_headlen(skb);
499495
dma_addr_t addr;
500496

501497
addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
502498
if (unlikely(dma_mapping_error(tx->dev, addr)))
503499
goto err;
504500

505-
dma_unmap_len_set(buf, len, len);
506-
dma_unmap_addr_set(buf, dma, addr);
507-
++pending_packet->num_bufs;
501+
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
502+
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
503+
++pkt->num_bufs;
508504

509505
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
510506
completion_tag,
511507
/*eop=*/shinfo->nr_frags == 0, is_gso);
512508
}
513509

514510
for (i = 0; i < shinfo->nr_frags; i++) {
515-
struct gve_tx_dma_buf *buf =
516-
&pending_packet->bufs[pending_packet->num_bufs];
517511
const skb_frag_t *frag = &shinfo->frags[i];
518512
bool is_eop = i == (shinfo->nr_frags - 1);
519513
u32 len = skb_frag_size(frag);
@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
523517
if (unlikely(dma_mapping_error(tx->dev, addr)))
524518
goto err;
525519

526-
dma_unmap_len_set(buf, len, len);
527-
dma_unmap_addr_set(buf, dma, addr);
528-
++pending_packet->num_bufs;
520+
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
521+
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
522+
++pkt->num_bufs;
529523

530524
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
531525
completion_tag, is_eop, is_gso);
@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
552546
return 0;
553547

554548
err:
555-
for (i = 0; i < pending_packet->num_bufs; i++) {
556-
struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
557-
549+
for (i = 0; i < pkt->num_bufs; i++) {
558550
if (i == 0) {
559-
dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
560-
dma_unmap_len(buf, len),
551+
dma_unmap_single(tx->dev,
552+
dma_unmap_addr(pkt, dma[i]),
553+
dma_unmap_len(pkt, len[i]),
561554
DMA_TO_DEVICE);
562555
} else {
563-
dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
564-
dma_unmap_len(buf, len), DMA_TO_DEVICE);
556+
dma_unmap_page(tx->dev,
557+
dma_unmap_addr(pkt, dma[i]),
558+
dma_unmap_len(pkt, len[i]),
559+
DMA_TO_DEVICE);
565560
}
566561
}
567562

568-
pending_packet->skb = NULL;
569-
pending_packet->num_bufs = 0;
570-
gve_free_pending_packet(tx, pending_packet);
563+
pkt->skb = NULL;
564+
pkt->num_bufs = 0;
565+
gve_free_pending_packet(tx, pkt);
571566

572567
return -1;
573568
}
@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
725720

726721
static void remove_from_list(struct gve_tx_ring *tx,
727722
struct gve_index_list *list,
728-
struct gve_tx_pending_packet_dqo *pending_packet)
723+
struct gve_tx_pending_packet_dqo *pkt)
729724
{
730725
s16 prev_index, next_index;
731726

732-
prev_index = pending_packet->prev;
733-
next_index = pending_packet->next;
727+
prev_index = pkt->prev;
728+
next_index = pkt->next;
734729

735730
if (prev_index == -1) {
736731
/* Node is head */
@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
747742
}
748743

749744
static void gve_unmap_packet(struct device *dev,
750-
struct gve_tx_pending_packet_dqo *pending_packet)
745+
struct gve_tx_pending_packet_dqo *pkt)
751746
{
752-
struct gve_tx_dma_buf *buf;
753747
int i;
754748

755749
/* SKB linear portion is guaranteed to be mapped */
756-
buf = &pending_packet->bufs[0];
757-
dma_unmap_single(dev, dma_unmap_addr(buf, dma),
758-
dma_unmap_len(buf, len), DMA_TO_DEVICE);
759-
for (i = 1; i < pending_packet->num_bufs; i++) {
760-
buf = &pending_packet->bufs[i];
761-
dma_unmap_page(dev, dma_unmap_addr(buf, dma),
762-
dma_unmap_len(buf, len), DMA_TO_DEVICE);
750+
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
751+
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
752+
for (i = 1; i < pkt->num_bufs; i++) {
753+
dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
754+
dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
763755
}
764-
pending_packet->num_bufs = 0;
756+
pkt->num_bufs = 0;
765757
}
766758

767759
/* Completion types and expected behavior:

0 commit comments

Comments
 (0)