summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-04-21 09:43:54 +0000
committerDavid S. Miller <davem@davemloft.net>2023-04-23 13:35:07 +0100
commit931e93bdf8ca71cef1f8759c43bc2c5385392b8b (patch)
treea75f366dac74b28ef860605937bbc15caabfe971 /net/core
parente8e1ce8454c9cc8ad2e4422bef346428e52455e3 (diff)
downloadlinux-931e93bdf8ca71cef1f8759c43bc2c5385392b8b.tar.gz
net: do not provide hard irq safety for sd->defer_lock
kfree_skb() can be called from hard irq handlers, but skb_attempt_defer_free() is meant to be used from process or BH contexts, and skb_defer_free_flush() is meant to be called from BH contexts. Not having to mask hard irq can save some cycles. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/skbuff.c5
2 files changed, 4 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 1551aabac343..d15568f5a44f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6632,11 +6632,11 @@ static void skb_defer_free_flush(struct softnet_data *sd)
if (!READ_ONCE(sd->defer_list))
return;
- spin_lock_irq(&sd->defer_lock);
+ spin_lock(&sd->defer_lock);
skb = sd->defer_list;
sd->defer_list = NULL;
sd->defer_count = 0;
- spin_unlock_irq(&sd->defer_lock);
+ spin_unlock(&sd->defer_lock);
while (skb != NULL) {
next = skb->next;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bd815a00d2af..304a966164d8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6870,7 +6870,6 @@ void skb_attempt_defer_free(struct sk_buff *skb)
{
int cpu = skb->alloc_cpu;
struct softnet_data *sd;
- unsigned long flags;
unsigned int defer_max;
bool kick;
@@ -6889,7 +6888,7 @@ nodefer: __kfree_skb(skb);
if (READ_ONCE(sd->defer_count) >= defer_max)
goto nodefer;
- spin_lock_irqsave(&sd->defer_lock, flags);
+ spin_lock_bh(&sd->defer_lock);
/* Send an IPI every time queue reaches half capacity. */
kick = sd->defer_count == (defer_max >> 1);
/* Paired with the READ_ONCE() few lines above */
@@ -6898,7 +6897,7 @@ nodefer: __kfree_skb(skb);
skb->next = sd->defer_list;
/* Paired with READ_ONCE() in skb_defer_free_flush() */
WRITE_ONCE(sd->defer_list, skb);
- spin_unlock_irqrestore(&sd->defer_lock, flags);
+ spin_unlock_bh(&sd->defer_lock);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).