netdev: Add netdev->select_queue() method.
Devices or device layers can set this to control the queue selection
performed by dev_pick_tx().
This function runs under RCU protection, which allows overriding
functions to have some way of synchronizing with things like dynamic
->real_num_tx_queues adjustments.
This makes the spinlock prefetch in dev_queue_xmit() a little bit
less effective, but that's the price right now for correctness.
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fdac115..9464e64 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -724,6 +724,9 @@
void (*poll_controller)(struct net_device *dev);
#endif
+ u16 (*select_queue)(struct net_device *dev,
+ struct sk_buff *skb);
+
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
diff --git a/net/core/dev.c b/net/core/dev.c
index f027a1a..7ca9564 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1670,6 +1670,9 @@
{
u16 queue_index = 0;
+ if (dev->select_queue)
+ queue_index = dev->select_queue(dev, skb);
+
skb_set_queue_mapping(skb, queue_index);
return netdev_get_tx_queue(dev, queue_index);
}
@@ -1710,14 +1713,14 @@
}
gso:
- txq = dev_pick_tx(dev, skb);
- spin_lock_prefetch(&txq->lock);
-
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh();
+ txq = dev_pick_tx(dev, skb);
+ spin_lock_prefetch(&txq->lock);
+
/* Updates of qdisc are serialized by queue->lock.
* The struct Qdisc which is pointed to by qdisc is now a
* rcu structure - it may be accessed without acquiring