Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 1 | #include <linux/skbuff.h> |
Jesper Dangaard Brouer | c452ed7 | 2012-01-24 16:03:33 -0500 | [diff] [blame] | 2 | #include <linux/export.h> |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 3 | #include <linux/ip.h> |
| 4 | #include <linux/ipv6.h> |
| 5 | #include <linux/if_vlan.h> |
| 6 | #include <net/ip.h> |
Eric Dumazet | ddbe503 | 2012-07-18 08:11:12 +0000 | [diff] [blame] | 7 | #include <net/ipv6.h> |
Daniel Borkmann | f77668d | 2013-03-19 06:39:30 +0000 | [diff] [blame] | 8 | #include <linux/igmp.h> |
| 9 | #include <linux/icmp.h> |
| 10 | #include <linux/sctp.h> |
| 11 | #include <linux/dccp.h> |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 12 | #include <linux/if_tunnel.h> |
| 13 | #include <linux/if_pppox.h> |
| 14 | #include <linux/ppp_defs.h> |
| 15 | #include <net/flow_keys.h> |
| 16 | |
Eric Dumazet | 4d77d2b | 2011-11-28 20:30:35 +0000 | [diff] [blame] | 17 | /* copy saddr & daddr, possibly using 64bit load/store |
| 18 | * Equivalent to : flow->src = iph->saddr; |
| 19 | * flow->dst = iph->daddr; |
| 20 | */ |
| 21 | static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph) |
| 22 | { |
| 23 | BUILD_BUG_ON(offsetof(typeof(*flow), dst) != |
| 24 | offsetof(typeof(*flow), src) + sizeof(flow->src)); |
| 25 | memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst)); |
| 26 | } |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 27 | |
Nikolay Aleksandrov | 357afe9 | 2013-10-02 13:39:24 +0200 | [diff] [blame] | 28 | /** |
| 29 | * skb_flow_get_ports - extract the upper layer ports and return them |
| 30 | * @skb: buffer to extract the ports from |
| 31 | * @thoff: transport header offset |
| 32 | * @ip_proto: protocol for which to get port offset |
| 33 | * |
| 34 | * The function will try to retrieve the ports at offset thoff + poff where poff |
| 35 | * is the protocol port offset returned from proto_ports_offset |
| 36 | */ |
| 37 | __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) |
| 38 | { |
| 39 | int poff = proto_ports_offset(ip_proto); |
| 40 | |
| 41 | if (poff >= 0) { |
| 42 | __be32 *ports, _ports; |
| 43 | |
| 44 | ports = skb_header_pointer(skb, thoff + poff, |
| 45 | sizeof(_ports), &_ports); |
| 46 | if (ports) |
| 47 | return *ports; |
| 48 | } |
| 49 | |
| 50 | return 0; |
| 51 | } |
| 52 | EXPORT_SYMBOL(skb_flow_get_ports); |
| 53 | |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 54 | bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) |
| 55 | { |
Nikolay Aleksandrov | 357afe9 | 2013-10-02 13:39:24 +0200 | [diff] [blame] | 56 | int nhoff = skb_network_offset(skb); |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 57 | u8 ip_proto; |
| 58 | __be16 proto = skb->protocol; |
| 59 | |
| 60 | memset(flow, 0, sizeof(*flow)); |
| 61 | |
| 62 | again: |
| 63 | switch (proto) { |
Joe Perches | 2b8837a | 2014-03-12 10:04:17 -0700 | [diff] [blame] | 64 | case htons(ETH_P_IP): { |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 65 | const struct iphdr *iph; |
| 66 | struct iphdr _iph; |
| 67 | ip: |
| 68 | iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); |
Jason Wang | 6f09234 | 2013-11-01 15:01:10 +0800 | [diff] [blame] | 69 | if (!iph || iph->ihl < 5) |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 70 | return false; |
Eric Dumazet | 3797d3e | 2013-11-07 08:37:28 -0800 | [diff] [blame] | 71 | nhoff += iph->ihl * 4; |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 72 | |
Eric Dumazet | 3797d3e | 2013-11-07 08:37:28 -0800 | [diff] [blame] | 73 | ip_proto = iph->protocol; |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 74 | if (ip_is_fragment(iph)) |
| 75 | ip_proto = 0; |
Eric Dumazet | 3797d3e | 2013-11-07 08:37:28 -0800 | [diff] [blame] | 76 | |
Eric Dumazet | 4d77d2b | 2011-11-28 20:30:35 +0000 | [diff] [blame] | 77 | iph_to_flow_copy_addrs(flow, iph); |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 78 | break; |
| 79 | } |
Joe Perches | 2b8837a | 2014-03-12 10:04:17 -0700 | [diff] [blame] | 80 | case htons(ETH_P_IPV6): { |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 81 | const struct ipv6hdr *iph; |
| 82 | struct ipv6hdr _iph; |
Tom Herbert | 19469a8 | 2014-07-01 21:33:01 -0700 | [diff] [blame] | 83 | __be32 flow_label; |
| 84 | |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 85 | ipv6: |
| 86 | iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); |
| 87 | if (!iph) |
| 88 | return false; |
| 89 | |
| 90 | ip_proto = iph->nexthdr; |
Eric Dumazet | ddbe503 | 2012-07-18 08:11:12 +0000 | [diff] [blame] | 91 | flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); |
| 92 | flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 93 | nhoff += sizeof(struct ipv6hdr); |
Tom Herbert | 19469a8 | 2014-07-01 21:33:01 -0700 | [diff] [blame] | 94 | |
| 95 | flow_label = ip6_flowlabel(iph); |
| 96 | if (flow_label) { |
| 97 | /* Awesome, IPv6 packet has a flow label so we can |
| 98 | * use that to represent the ports without any |
| 99 | * further dissection. |
| 100 | */ |
| 101 | flow->n_proto = proto; |
| 102 | flow->ip_proto = ip_proto; |
| 103 | flow->ports = flow_label; |
| 104 | flow->thoff = (u16)nhoff; |
| 105 | |
| 106 | return true; |
| 107 | } |
| 108 | |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 109 | break; |
| 110 | } |
Joe Perches | 2b8837a | 2014-03-12 10:04:17 -0700 | [diff] [blame] | 111 | case htons(ETH_P_8021AD): |
| 112 | case htons(ETH_P_8021Q): { |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 113 | const struct vlan_hdr *vlan; |
| 114 | struct vlan_hdr _vlan; |
| 115 | |
| 116 | vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan); |
| 117 | if (!vlan) |
| 118 | return false; |
| 119 | |
| 120 | proto = vlan->h_vlan_encapsulated_proto; |
| 121 | nhoff += sizeof(*vlan); |
| 122 | goto again; |
| 123 | } |
Joe Perches | 2b8837a | 2014-03-12 10:04:17 -0700 | [diff] [blame] | 124 | case htons(ETH_P_PPP_SES): { |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 125 | struct { |
| 126 | struct pppoe_hdr hdr; |
| 127 | __be16 proto; |
| 128 | } *hdr, _hdr; |
| 129 | hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); |
| 130 | if (!hdr) |
| 131 | return false; |
| 132 | proto = hdr->proto; |
| 133 | nhoff += PPPOE_SES_HLEN; |
| 134 | switch (proto) { |
Joe Perches | 2b8837a | 2014-03-12 10:04:17 -0700 | [diff] [blame] | 135 | case htons(PPP_IP): |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 136 | goto ip; |
Joe Perches | 2b8837a | 2014-03-12 10:04:17 -0700 | [diff] [blame] | 137 | case htons(PPP_IPV6): |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 138 | goto ipv6; |
| 139 | default: |
| 140 | return false; |
| 141 | } |
| 142 | } |
| 143 | default: |
| 144 | return false; |
| 145 | } |
| 146 | |
| 147 | switch (ip_proto) { |
| 148 | case IPPROTO_GRE: { |
| 149 | struct gre_hdr { |
| 150 | __be16 flags; |
| 151 | __be16 proto; |
| 152 | } *hdr, _hdr; |
| 153 | |
| 154 | hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); |
| 155 | if (!hdr) |
| 156 | return false; |
| 157 | /* |
| 158 | * Only look inside GRE if version zero and no |
| 159 | * routing |
| 160 | */ |
| 161 | if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { |
| 162 | proto = hdr->proto; |
| 163 | nhoff += 4; |
| 164 | if (hdr->flags & GRE_CSUM) |
| 165 | nhoff += 4; |
| 166 | if (hdr->flags & GRE_KEY) |
| 167 | nhoff += 4; |
| 168 | if (hdr->flags & GRE_SEQ) |
| 169 | nhoff += 4; |
Michael Dalton | e1733de | 2013-03-11 06:52:28 +0000 | [diff] [blame] | 170 | if (proto == htons(ETH_P_TEB)) { |
| 171 | const struct ethhdr *eth; |
| 172 | struct ethhdr _eth; |
| 173 | |
| 174 | eth = skb_header_pointer(skb, nhoff, |
| 175 | sizeof(_eth), &_eth); |
| 176 | if (!eth) |
| 177 | return false; |
| 178 | proto = eth->h_proto; |
| 179 | nhoff += sizeof(*eth); |
| 180 | } |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 181 | goto again; |
| 182 | } |
| 183 | break; |
| 184 | } |
| 185 | case IPPROTO_IPIP: |
Tom Herbert | fca4189 | 2013-07-29 11:07:36 -0700 | [diff] [blame] | 186 | proto = htons(ETH_P_IP); |
| 187 | goto ip; |
Tom Herbert | b438f94 | 2013-07-29 11:07:42 -0700 | [diff] [blame] | 188 | case IPPROTO_IPV6: |
| 189 | proto = htons(ETH_P_IPV6); |
| 190 | goto ipv6; |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 191 | default: |
| 192 | break; |
| 193 | } |
| 194 | |
Govindarajulu Varadarajan | e0f31d8 | 2014-06-23 16:07:58 +0530 | [diff] [blame] | 195 | flow->n_proto = proto; |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 196 | flow->ip_proto = ip_proto; |
Nikolay Aleksandrov | 357afe9 | 2013-10-02 13:39:24 +0200 | [diff] [blame] | 197 | flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto); |
Daniel Borkmann | 8ed7816 | 2013-03-19 06:39:29 +0000 | [diff] [blame] | 198 | flow->thoff = (u16) nhoff; |
| 199 | |
Eric Dumazet | 0744dd0 | 2011-11-28 05:22:18 +0000 | [diff] [blame] | 200 | return true; |
| 201 | } |
| 202 | EXPORT_SYMBOL(skb_flow_dissect); |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 203 | |
| 204 | static u32 hashrnd __read_mostly; |
Hannes Frederic Sowa | 66415cf | 2013-10-23 20:06:00 +0200 | [diff] [blame] | 205 | static __always_inline void __flow_hash_secret_init(void) |
| 206 | { |
| 207 | net_get_random_once(&hashrnd, sizeof(hashrnd)); |
| 208 | } |
| 209 | |
| 210 | static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c) |
| 211 | { |
| 212 | __flow_hash_secret_init(); |
| 213 | return jhash_3words(a, b, c, hashrnd); |
| 214 | } |
| 215 | |
Tom Herbert | 5ed20a6 | 2014-07-01 21:32:05 -0700 | [diff] [blame] | 216 | static inline u32 __flow_hash_from_keys(struct flow_keys *keys) |
| 217 | { |
| 218 | u32 hash; |
| 219 | |
| 220 | /* get a consistent hash (same value on both flow directions) */ |
| 221 | if (((__force u32)keys->dst < (__force u32)keys->src) || |
| 222 | (((__force u32)keys->dst == (__force u32)keys->src) && |
| 223 | ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) { |
| 224 | swap(keys->dst, keys->src); |
| 225 | swap(keys->port16[0], keys->port16[1]); |
| 226 | } |
| 227 | |
| 228 | hash = __flow_hash_3words((__force u32)keys->dst, |
| 229 | (__force u32)keys->src, |
| 230 | (__force u32)keys->ports); |
| 231 | if (!hash) |
| 232 | hash = 1; |
| 233 | |
| 234 | return hash; |
| 235 | } |
| 236 | |
| 237 | u32 flow_hash_from_keys(struct flow_keys *keys) |
| 238 | { |
| 239 | return __flow_hash_from_keys(keys); |
| 240 | } |
| 241 | EXPORT_SYMBOL(flow_hash_from_keys); |
| 242 | |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 243 | /* |
Tom Herbert | 3958afa1b | 2013-12-15 22:12:06 -0800 | [diff] [blame] | 244 | * __skb_get_hash: calculate a flow hash based on src/dst addresses |
Tom Herbert | 61b905d | 2014-03-24 15:34:47 -0700 | [diff] [blame] | 245 | * and src/dst port numbers. Sets hash in skb to non-zero hash value |
| 246 | * on success, zero indicates no valid hash. Also, sets l4_hash in skb |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 247 | * if hash is a canonical 4-tuple hash over transport ports. |
| 248 | */ |
Tom Herbert | 3958afa1b | 2013-12-15 22:12:06 -0800 | [diff] [blame] | 249 | void __skb_get_hash(struct sk_buff *skb) |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 250 | { |
| 251 | struct flow_keys keys; |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 252 | |
| 253 | if (!skb_flow_dissect(skb, &keys)) |
| 254 | return; |
| 255 | |
| 256 | if (keys.ports) |
Tom Herbert | 61b905d | 2014-03-24 15:34:47 -0700 | [diff] [blame] | 257 | skb->l4_hash = 1; |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 258 | |
Tom Herbert | a3b18dd | 2014-07-01 21:33:17 -0700 | [diff] [blame] | 259 | skb->sw_hash = 1; |
| 260 | |
Tom Herbert | 5ed20a6 | 2014-07-01 21:32:05 -0700 | [diff] [blame] | 261 | skb->hash = __flow_hash_from_keys(&keys); |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 262 | } |
Tom Herbert | 3958afa1b | 2013-12-15 22:12:06 -0800 | [diff] [blame] | 263 | EXPORT_SYMBOL(__skb_get_hash); |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 264 | |
| 265 | /* |
| 266 | * Returns a Tx hash based on the given packet descriptor a Tx queues' number |
| 267 | * to be used as a distribution range. |
| 268 | */ |
Tom Herbert | 0e00161 | 2014-07-01 21:32:27 -0700 | [diff] [blame] | 269 | u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 270 | unsigned int num_tx_queues) |
| 271 | { |
| 272 | u32 hash; |
| 273 | u16 qoffset = 0; |
| 274 | u16 qcount = num_tx_queues; |
| 275 | |
| 276 | if (skb_rx_queue_recorded(skb)) { |
| 277 | hash = skb_get_rx_queue(skb); |
| 278 | while (unlikely(hash >= num_tx_queues)) |
| 279 | hash -= num_tx_queues; |
| 280 | return hash; |
| 281 | } |
| 282 | |
| 283 | if (dev->num_tc) { |
| 284 | u8 tc = netdev_get_prio_tc_map(dev, skb->priority); |
| 285 | qoffset = dev->tc_to_txq[tc].offset; |
| 286 | qcount = dev->tc_to_txq[tc].count; |
| 287 | } |
| 288 | |
Tom Herbert | 0e00161 | 2014-07-01 21:32:27 -0700 | [diff] [blame] | 289 | return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset; |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 290 | } |
| 291 | EXPORT_SYMBOL(__skb_tx_hash); |
| 292 | |
Daniel Borkmann | f77668d | 2013-03-19 06:39:30 +0000 | [diff] [blame] | 293 | /* __skb_get_poff() returns the offset to the payload as far as it could |
| 294 | * be dissected. The main user is currently BPF, so that we can dynamically |
| 295 | * truncate packets without needing to push actual payload to the user |
| 296 | * space and can analyze headers only, instead. |
| 297 | */ |
| 298 | u32 __skb_get_poff(const struct sk_buff *skb) |
| 299 | { |
| 300 | struct flow_keys keys; |
| 301 | u32 poff = 0; |
| 302 | |
| 303 | if (!skb_flow_dissect(skb, &keys)) |
| 304 | return 0; |
| 305 | |
| 306 | poff += keys.thoff; |
| 307 | switch (keys.ip_proto) { |
| 308 | case IPPROTO_TCP: { |
| 309 | const struct tcphdr *tcph; |
| 310 | struct tcphdr _tcph; |
| 311 | |
| 312 | tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); |
| 313 | if (!tcph) |
| 314 | return poff; |
| 315 | |
| 316 | poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4); |
| 317 | break; |
| 318 | } |
| 319 | case IPPROTO_UDP: |
| 320 | case IPPROTO_UDPLITE: |
| 321 | poff += sizeof(struct udphdr); |
| 322 | break; |
| 323 | /* For the rest, we do not really care about header |
| 324 | * extensions at this point for now. |
| 325 | */ |
| 326 | case IPPROTO_ICMP: |
| 327 | poff += sizeof(struct icmphdr); |
| 328 | break; |
| 329 | case IPPROTO_ICMPV6: |
| 330 | poff += sizeof(struct icmp6hdr); |
| 331 | break; |
| 332 | case IPPROTO_IGMP: |
| 333 | poff += sizeof(struct igmphdr); |
| 334 | break; |
| 335 | case IPPROTO_DCCP: |
| 336 | poff += sizeof(struct dccp_hdr); |
| 337 | break; |
| 338 | case IPPROTO_SCTP: |
| 339 | poff += sizeof(struct sctphdr); |
| 340 | break; |
| 341 | } |
| 342 | |
| 343 | return poff; |
| 344 | } |
| 345 | |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 346 | static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) |
| 347 | { |
| 348 | #ifdef CONFIG_XPS |
| 349 | struct xps_dev_maps *dev_maps; |
| 350 | struct xps_map *map; |
| 351 | int queue_index = -1; |
| 352 | |
| 353 | rcu_read_lock(); |
| 354 | dev_maps = rcu_dereference(dev->xps_maps); |
| 355 | if (dev_maps) { |
| 356 | map = rcu_dereference( |
| 357 | dev_maps->cpu_map[raw_smp_processor_id()]); |
| 358 | if (map) { |
| 359 | if (map->len == 1) |
| 360 | queue_index = map->queues[0]; |
Tom Herbert | 0e00161 | 2014-07-01 21:32:27 -0700 | [diff] [blame] | 361 | else |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 362 | queue_index = map->queues[ |
Tom Herbert | 0e00161 | 2014-07-01 21:32:27 -0700 | [diff] [blame] | 363 | ((u64)skb_get_hash(skb) * map->len) >> 32]; |
| 364 | |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 365 | if (unlikely(queue_index >= dev->real_num_tx_queues)) |
| 366 | queue_index = -1; |
| 367 | } |
| 368 | } |
| 369 | rcu_read_unlock(); |
| 370 | |
| 371 | return queue_index; |
| 372 | #else |
| 373 | return -1; |
| 374 | #endif |
| 375 | } |
| 376 | |
Daniel Borkmann | 99932d4 | 2014-02-16 15:55:20 +0100 | [diff] [blame] | 377 | static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 378 | { |
| 379 | struct sock *sk = skb->sk; |
| 380 | int queue_index = sk_tx_queue_get(sk); |
| 381 | |
| 382 | if (queue_index < 0 || skb->ooo_okay || |
| 383 | queue_index >= dev->real_num_tx_queues) { |
| 384 | int new_index = get_xps_queue(dev, skb); |
| 385 | if (new_index < 0) |
| 386 | new_index = skb_tx_hash(dev, skb); |
| 387 | |
Eric Dumazet | 702821f | 2013-08-28 18:10:43 -0700 | [diff] [blame] | 388 | if (queue_index != new_index && sk && |
| 389 | rcu_access_pointer(sk->sk_dst_cache)) |
Eric Dumazet | 50d1784 | 2013-09-07 12:02:57 -0700 | [diff] [blame] | 390 | sk_tx_queue_set(sk, new_index); |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 391 | |
| 392 | queue_index = new_index; |
| 393 | } |
| 394 | |
| 395 | return queue_index; |
| 396 | } |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 397 | |
| 398 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
Jason Wang | f663dd9 | 2014-01-10 16:18:26 +0800 | [diff] [blame] | 399 | struct sk_buff *skb, |
| 400 | void *accel_priv) |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 401 | { |
| 402 | int queue_index = 0; |
| 403 | |
| 404 | if (dev->real_num_tx_queues != 1) { |
| 405 | const struct net_device_ops *ops = dev->netdev_ops; |
| 406 | if (ops->ndo_select_queue) |
Daniel Borkmann | 99932d4 | 2014-02-16 15:55:20 +0100 | [diff] [blame] | 407 | queue_index = ops->ndo_select_queue(dev, skb, accel_priv, |
| 408 | __netdev_pick_tx); |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 409 | else |
| 410 | queue_index = __netdev_pick_tx(dev, skb); |
Jason Wang | f663dd9 | 2014-01-10 16:18:26 +0800 | [diff] [blame] | 411 | |
| 412 | if (!accel_priv) |
Daniel Borkmann | b9507bd | 2014-02-16 15:55:21 +0100 | [diff] [blame] | 413 | queue_index = netdev_cap_txqueue(dev, queue_index); |
Cong Wang | 441d9d3 | 2013-01-21 00:39:24 +0000 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | skb_set_queue_mapping(skb, queue_index); |
| 417 | return netdev_get_tx_queue(dev, queue_index); |
| 418 | } |