Skip to content

Commit e344e97

Browse files
Sergei Shtylyovgregkh
authored andcommitted
sh_eth: unmap DMA buffers when freeing rings
[ Upstream commit 1debdc8 ] The DMA API debugging (when enabled) causes: WARNING: CPU: 0 PID: 1445 at lib/dma-debug.c:519 add_dma_entry+0xe0/0x12c DMA-API: exceeded 7 overlapping mappings of cacheline 0x01b2974d to be printed after repeated initialization of the Ether device, e.g. suspend/resume or 'ifconfig' up/down. This is because DMA buffers mapped using dma_map_single() in sh_eth_ring_format() and sh_eth_start_xmit() are never unmapped. Resolve this problem by unmapping the buffers when freeing the descriptor rings; in order to do it right, we'd have to add an extra parameter to sh_eth_txfree() (we rename this function to sh_eth_tx_free(), while at it). Based on the commit a47b70e ("ravb: unmap descriptors when freeing rings"). Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent b4580d6 commit e344e97

1 file changed

Lines changed: 67 additions & 55 deletions

File tree

drivers/net/ethernet/renesas/sh_eth.c

Lines changed: 67 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1059,12 +1059,70 @@ static struct mdiobb_ops bb_ops = {
10591059
.get_mdio_data = sh_get_mdio,
10601060
};
10611061

1062+
/* free Tx skb function */
1063+
static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1064+
{
1065+
struct sh_eth_private *mdp = netdev_priv(ndev);
1066+
struct sh_eth_txdesc *txdesc;
1067+
int free_num = 0;
1068+
int entry;
1069+
bool sent;
1070+
1071+
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1072+
entry = mdp->dirty_tx % mdp->num_tx_ring;
1073+
txdesc = &mdp->tx_ring[entry];
1074+
sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1075+
if (sent_only && !sent)
1076+
break;
1077+
/* TACT bit must be checked before all the following reads */
1078+
dma_rmb();
1079+
netif_info(mdp, tx_done, ndev,
1080+
"tx entry %d status 0x%08x\n",
1081+
entry, le32_to_cpu(txdesc->status));
1082+
/* Free the original skb. */
1083+
if (mdp->tx_skbuff[entry]) {
1084+
dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1085+
le32_to_cpu(txdesc->len) >> 16,
1086+
DMA_TO_DEVICE);
1087+
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1088+
mdp->tx_skbuff[entry] = NULL;
1089+
free_num++;
1090+
}
1091+
txdesc->status = cpu_to_le32(TD_TFP);
1092+
if (entry >= mdp->num_tx_ring - 1)
1093+
txdesc->status |= cpu_to_le32(TD_TDLE);
1094+
1095+
if (sent) {
1096+
ndev->stats.tx_packets++;
1097+
ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1098+
}
1099+
}
1100+
return free_num;
1101+
}
1102+
10621103
/* free skb and descriptor buffer */
10631104
static void sh_eth_ring_free(struct net_device *ndev)
10641105
{
10651106
struct sh_eth_private *mdp = netdev_priv(ndev);
10661107
int ringsize, i;
10671108

1109+
if (mdp->rx_ring) {
1110+
for (i = 0; i < mdp->num_rx_ring; i++) {
1111+
if (mdp->rx_skbuff[i]) {
1112+
struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1113+
1114+
dma_unmap_single(&ndev->dev,
1115+
le32_to_cpu(rxdesc->addr),
1116+
ALIGN(mdp->rx_buf_sz, 32),
1117+
DMA_FROM_DEVICE);
1118+
}
1119+
}
1120+
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1121+
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1122+
mdp->rx_desc_dma);
1123+
mdp->rx_ring = NULL;
1124+
}
1125+
10681126
/* Free Rx skb ringbuffer */
10691127
if (mdp->rx_skbuff) {
10701128
for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1073,27 +1131,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
10731131
kfree(mdp->rx_skbuff);
10741132
mdp->rx_skbuff = NULL;
10751133

1076-
/* Free Tx skb ringbuffer */
1077-
if (mdp->tx_skbuff) {
1078-
for (i = 0; i < mdp->num_tx_ring; i++)
1079-
dev_kfree_skb(mdp->tx_skbuff[i]);
1080-
}
1081-
kfree(mdp->tx_skbuff);
1082-
mdp->tx_skbuff = NULL;
1083-
1084-
if (mdp->rx_ring) {
1085-
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1086-
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1087-
mdp->rx_desc_dma);
1088-
mdp->rx_ring = NULL;
1089-
}
1090-
10911134
if (mdp->tx_ring) {
1135+
sh_eth_tx_free(ndev, false);
1136+
10921137
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
10931138
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
10941139
mdp->tx_desc_dma);
10951140
mdp->tx_ring = NULL;
10961141
}
1142+
1143+
/* Free Tx skb ringbuffer */
1144+
kfree(mdp->tx_skbuff);
1145+
mdp->tx_skbuff = NULL;
10971146
}
10981147

10991148
/* format skb and descriptor buffer */
@@ -1341,43 +1390,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
13411390
update_mac_address(ndev);
13421391
}
13431392

1344-
/* free Tx skb function */
1345-
static int sh_eth_txfree(struct net_device *ndev)
1346-
{
1347-
struct sh_eth_private *mdp = netdev_priv(ndev);
1348-
struct sh_eth_txdesc *txdesc;
1349-
int free_num = 0;
1350-
int entry;
1351-
1352-
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1353-
entry = mdp->dirty_tx % mdp->num_tx_ring;
1354-
txdesc = &mdp->tx_ring[entry];
1355-
if (txdesc->status & cpu_to_le32(TD_TACT))
1356-
break;
1357-
/* TACT bit must be checked before all the following reads */
1358-
dma_rmb();
1359-
netif_info(mdp, tx_done, ndev,
1360-
"tx entry %d status 0x%08x\n",
1361-
entry, le32_to_cpu(txdesc->status));
1362-
/* Free the original skb. */
1363-
if (mdp->tx_skbuff[entry]) {
1364-
dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1365-
le32_to_cpu(txdesc->len) >> 16,
1366-
DMA_TO_DEVICE);
1367-
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1368-
mdp->tx_skbuff[entry] = NULL;
1369-
free_num++;
1370-
}
1371-
txdesc->status = cpu_to_le32(TD_TFP);
1372-
if (entry >= mdp->num_tx_ring - 1)
1373-
txdesc->status |= cpu_to_le32(TD_TDLE);
1374-
1375-
ndev->stats.tx_packets++;
1376-
ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1377-
}
1378-
return free_num;
1379-
}
1380-
13811393
/* Packet receive function */
13821394
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
13831395
{
@@ -1620,7 +1632,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
16201632
intr_status, mdp->cur_tx, mdp->dirty_tx,
16211633
(u32)ndev->state, edtrr);
16221634
/* dirty buffer free */
1623-
sh_eth_txfree(ndev);
1635+
sh_eth_tx_free(ndev, true);
16241636

16251637
/* SH7712 BUG */
16261638
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1679,7 +1691,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
16791691
/* Clear Tx interrupts */
16801692
sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
16811693

1682-
sh_eth_txfree(ndev);
1694+
sh_eth_tx_free(ndev, true);
16831695
netif_wake_queue(ndev);
16841696
}
16851697

@@ -2307,7 +2319,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
23072319

23082320
spin_lock_irqsave(&mdp->lock, flags);
23092321
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2310-
if (!sh_eth_txfree(ndev)) {
2322+
if (!sh_eth_tx_free(ndev, true)) {
23112323
netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
23122324
netif_stop_queue(ndev);
23132325
spin_unlock_irqrestore(&mdp->lock, flags);

0 commit comments

Comments
 (0)