diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bcm63xx_enet.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bcm63xx_enet.c | 186 |
1 files changed, 93 insertions, 93 deletions
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 916824cca3fd..fd8767213165 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, /* * refill rx queue */ -static int bcm_enet_refill_rx(struct net_device *dev) +static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode) { struct bcm_enet_priv *priv; @@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev) while (priv->rx_desc_count < priv->rx_ring_size) { struct bcm_enet_desc *desc; - struct sk_buff *skb; - dma_addr_t p; int desc_idx; u32 len_stat; desc_idx = priv->rx_dirty_desc; desc = &priv->rx_desc_cpu[desc_idx]; - if (!priv->rx_skb[desc_idx]) { - skb = netdev_alloc_skb(dev, priv->rx_skb_size); - if (!skb) + if (!priv->rx_buf[desc_idx]) { + void *buf; + + if (likely(napi_mode)) + buf = napi_alloc_frag(priv->rx_frag_size); + else + buf = netdev_alloc_frag(priv->rx_frag_size); + if (unlikely(!buf)) break; - priv->rx_skb[desc_idx] = skb; - p = dma_map_single(&priv->pdev->dev, skb->data, - priv->rx_skb_size, - DMA_FROM_DEVICE); - desc->address = p; + priv->rx_buf[desc_idx] = buf; + desc->address = dma_map_single(&priv->pdev->dev, + buf + priv->rx_buf_offset, + priv->rx_buf_size, + DMA_FROM_DEVICE); } - len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; + len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); @@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t) struct net_device *dev = priv->net_dev; spin_lock(&priv->rx_lock); - bcm_enet_refill_rx(dev); + bcm_enet_refill_rx(dev, false); spin_unlock(&priv->rx_lock); } @@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t) static int bcm_enet_receive_queue(struct net_device *dev, int budget) { struct bcm_enet_priv *priv; + struct list_head rx_list; struct device *kdev; int processed; priv = netdev_priv(dev); + INIT_LIST_HEAD(&rx_list); kdev = &priv->pdev->dev; processed = 0; @@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) int desc_idx; u32 len_stat; unsigned int len; + void *buf; desc_idx = priv->rx_curr_desc; desc = &priv->rx_desc_cpu[desc_idx]; @@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) priv->rx_curr_desc++; if (priv->rx_curr_desc == priv->rx_ring_size) priv->rx_curr_desc = 0; - priv->rx_desc_count--; /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ @@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) } /* valid packet */ - skb = priv->rx_skb[desc_idx]; + buf = priv->rx_buf[desc_idx]; len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; /* don't include FCS */ len -= 4; if (len < copybreak) { - struct sk_buff *nskb; - - nskb = napi_alloc_skb(&priv->napi, len); - if (!nskb) { + skb = napi_alloc_skb(&priv->napi, len); + if (unlikely(!skb)) { /* forget packet, just rearm desc */ dev->stats.rx_dropped++; continue; @@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) dma_sync_single_for_cpu(kdev, desc->address, len, DMA_FROM_DEVICE); - memcpy(nskb->data, skb->data, len); + memcpy(skb->data, buf + priv->rx_buf_offset, len); dma_sync_single_for_device(kdev, desc->address, len, DMA_FROM_DEVICE); - skb = nskb; } else { - dma_unmap_single(&priv->pdev->dev, desc->address, - priv->rx_skb_size, DMA_FROM_DEVICE); - priv->rx_skb[desc_idx] = NULL; + dma_unmap_single(kdev, desc->address, + priv->rx_buf_size, DMA_FROM_DEVICE); + priv->rx_buf[desc_idx] = NULL; + + skb = build_skb(buf, priv->rx_frag_size); + if (unlikely(!skb)) { + skb_free_frag(buf); + dev->stats.rx_dropped++; + continue; + } + skb_reserve(skb, priv->rx_buf_offset); } skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += len; - netif_receive_skb(skb); + list_add_tail(&skb->list, &rx_list); - } while (--budget > 0); + } while (processed < budget); + + netif_receive_skb_list(&rx_list); + priv->rx_desc_count -= processed; if (processed || !priv->rx_desc_count) { - bcm_enet_refill_rx(dev); + bcm_enet_refill_rx(dev, true); /* kick rx dma */ enet_dmac_writel(priv, priv->dma_chan_en_mask, @@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) static int bcm_enet_tx_reclaim(struct net_device *dev, int force) { struct bcm_enet_priv *priv; + unsigned int bytes; int released; priv = netdev_priv(dev); + bytes = 0; released = 0; while (priv->tx_desc_count < priv->tx_ring_size) { @@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force) if (desc->len_stat & DMADESC_UNDER_MASK) dev->stats.tx_errors++; + bytes += skb->len; dev_kfree_skb(skb); released++; } + netdev_completed_queue(dev, released, bytes); + if (netif_queue_stopped(dev) && released) netif_wake_queue(dev); @@ -622,8 +640,11 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) desc->len_stat = len_stat; wmb(); + netdev_sent_queue(dev, skb->len); + /* kick tx dma */ - enet_dmac_writel(priv, priv->dma_chan_en_mask, + if (!netdev_xmit_more() || !priv->tx_desc_count) + enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->tx_chan); /* stop queue if no more desc available */ @@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev) priv->pause_tx ? "tx" : "off"); } +static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv) +{ + int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_buf[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_buf_size, + DMA_FROM_DEVICE); + skb_free_frag(priv->rx_buf[i]); + } + kfree(priv->rx_buf); +} + /* * open callback, allocate dma rings & buffers and start rx operation */ @@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev) priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); - /* init & fill rx ring with skbs */ - priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), + /* init & fill rx ring with buffers */ + priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), GFP_KERNEL); - if (!priv->rx_skb) { + if (!priv->rx_buf) { ret = -ENOMEM; goto out_free_tx_skb; } @@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev) enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMAC_BUFALLOC, priv->rx_chan); - if (bcm_enet_refill_rx(dev)) { - dev_err(kdev, "cannot allocate rx skb queue\n"); + if (bcm_enet_refill_rx(dev, false)) { + dev_err(kdev, "cannot allocate rx buffer queue\n"); ret = -ENOMEM; goto out; } @@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev) return 0; out: - for (i = 0; i < priv->rx_ring_size; i++) { - struct bcm_enet_desc *desc; - - if (!priv->rx_skb[i]) - continue; - - desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_skb_size, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_skb[i]); - } - kfree(priv->rx_skb); + bcm_enet_free_rx_buf_ring(kdev, priv); out_free_tx_skb: kfree(priv->tx_skb); @@ -1159,12 +1187,12 @@ static int bcm_enet_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; - int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; netif_stop_queue(dev); + netdev_reset_queue(dev); napi_disable(&priv->napi); if (priv->has_phy) phy_stop(dev->phydev); @@ -1186,21 +1214,10 @@ static int bcm_enet_stop(struct net_device *dev) /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); - /* free the rx skb ring */ - for (i = 0; i < priv->rx_ring_size; i++) { - struct bcm_enet_desc *desc; - - if (!priv->rx_skb[i]) - continue; - - desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_skb_size, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_skb[i]); - } + /* free the rx buffer ring */ + bcm_enet_free_rx_buf_ring(kdev, priv); /* free remaining allocated memory */ - kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); @@ -1622,9 +1639,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) * align rx buffer size to dma burst len, account FCS since * it's appended */ - priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, + priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN, priv->dma_maxburst * 4); + priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + dev->mtu = new_mtu; return 0; } @@ -1709,6 +1729,7 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->enet_is_sw = false; priv->dma_maxburst = BCMENET_DMA_MAXBURST; + priv->rx_buf_offset = NET_SKB_PAD; ret = bcm_enet_change_mtu(dev, dev->mtu); if (ret) @@ -2126,7 +2147,7 @@ static int bcm_enetsw_open(struct net_device *dev) priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skb) { - dev_err(kdev, "cannot allocate rx skb queue\n"); + dev_err(kdev, "cannot allocate tx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } @@ -2136,11 +2157,11 @@ static int bcm_enetsw_open(struct net_device *dev) priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); - /* init & fill rx ring with skbs */ - priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), + /* init & fill rx ring with buffers */ + priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), GFP_KERNEL); - if (!priv->rx_skb) { - dev_err(kdev, "cannot allocate rx skb queue\n"); + if (!priv->rx_buf) { + dev_err(kdev, "cannot allocate rx buffer queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } @@ -2187,8 +2208,8 @@ static int bcm_enetsw_open(struct net_device *dev) enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); - if (bcm_enet_refill_rx(dev)) { - dev_err(kdev, "cannot allocate rx skb queue\n"); + if (bcm_enet_refill_rx(dev, false)) { + dev_err(kdev, "cannot allocate rx buffer queue\n"); ret = -ENOMEM; goto out; } @@ -2287,18 +2308,7 @@ static int bcm_enetsw_open(struct net_device *dev) return 0; out: - for (i = 0; i < priv->rx_ring_size; i++) { - struct bcm_enet_desc *desc; - - if (!priv->rx_skb[i]) - continue; - - desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_skb_size, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_skb[i]); - } - kfree(priv->rx_skb); + bcm_enet_free_rx_buf_ring(kdev, priv); out_free_tx_skb: kfree(priv->tx_skb); @@ -2327,13 +2337,13 @@ static int bcm_enetsw_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; - int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; del_timer_sync(&priv->swphy_poll); netif_stop_queue(dev); + netdev_reset_queue(dev); napi_disable(&priv->napi); del_timer_sync(&priv->rx_timeout); @@ -2348,21 +2358,10 @@ static int bcm_enetsw_stop(struct net_device *dev) /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); - /* free the rx skb ring */ - for (i = 0; i < priv->rx_ring_size; i++) { - struct bcm_enet_desc *desc; - - if (!priv->rx_skb[i]) - continue; - - desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_skb_size, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_skb[i]); - } + /* free the rx buffer ring */ + bcm_enet_free_rx_buf_ring(kdev, priv); /* free remaining allocated memory */ - kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); @@ -2659,6 +2658,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC; priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; + priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN; pd = dev_get_platdata(&pdev->dev); if (pd) { |