| /* |
| * NET3 Protocol independent device support routines. |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public License |
| * as published by the Free Software Foundation; either version |
| * 2 of the License, or (at your option) any later version. |
| * |
| * Derived from the non IP parts of dev.c 1.0.19 |
| * Authors: Ross Biro |
| * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| * |
| * Additional Authors: |
| * Florian la Roche <rzsfl@rz.uni-sb.de> |
| * Alan Cox <gw4pts@gw4pts.ampr.org> |
| * David Hinds <dahinds@users.sourceforge.net> |
| * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
| * Adam Sulmicki <adam@cfar.umd.edu> |
| * Pekka Riikonen <priikone@poesidon.pspt.fi> |
| * |
| * Changes: |
| * D.J. Barrow : Fixed bug where dev->refcnt gets set |
| * to 2 if register_netdev gets called |
| * before net_dev_init & also removed a |
| * few lines of code in the process. |
| * Alan Cox : device private ioctl copies fields back. |
| * Alan Cox : Transmit queue code does relevant |
| * stunts to keep the queue safe. |
| * Alan Cox : Fixed double lock. |
| * Alan Cox : Fixed promisc NULL pointer trap |
| * ???????? : Support the full private ioctl range |
| * Alan Cox : Moved ioctl permission check into |
| * drivers |
| * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI |
| * Alan Cox : 100 backlog just doesn't cut it when |
| * you start doing multicast video 8) |
| * Alan Cox : Rewrote net_bh and list manager. |
| * Alan Cox : Fix ETH_P_ALL echoback lengths. |
| * Alan Cox : Took out transmit every packet pass |
| * Saved a few bytes in the ioctl handler |
| * Alan Cox : Network driver sets packet type before |
| * calling netif_rx. Saves a function |
| * call a packet. |
| * Alan Cox : Hashed net_bh() |
| * Richard Kooijman: Timestamp fixes. |
| * Alan Cox : Wrong field in SIOCGIFDSTADDR |
| * Alan Cox : Device lock protection. |
| * Alan Cox : Fixed nasty side effect of device close |
| * changes. |
| * Rudi Cilibrasi : Pass the right thing to |
| * set_mac_address() |
| * Dave Miller : 32bit quantity for the device lock to |
| * make it work out on a Sparc. |
| * Bjorn Ekwall : Added KERNELD hack. |
| * Alan Cox : Cleaned up the backlog initialise. |
| * Craig Metz : SIOCGIFCONF fix if space for under |
| * 1 device. |
| * Thomas Bogendoerfer : Return ENODEV for dev_open, if there |
| * is no device open function. |
| * Andi Kleen : Fix error reporting for SIOCGIFCONF |
| * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF |
| * Cyrus Durgin : Cleaned for KMOD |
| * Adam Sulmicki : Bug Fix : Network Device Unload |
| * A network device unload needs to purge |
| * the backlog queue. |
| * Paul Rusty Russell : SIOCSIFNAME |
| * Pekka Riikonen : Netdev boot-time settings code |
| * Andrew Morton : Make unregister_netdevice wait |
| * indefinitely on dev->refcnt |
| * J Hadi Salim : - Backlog queue sampling |
| * - netif_rx() feedback |
| */ |
| |
| #include <asm/uaccess.h> |
| #include <linux/bitops.h> |
| #include <linux/capability.h> |
| #include <linux/cpu.h> |
| #include <linux/types.h> |
| #include <linux/kernel.h> |
| #include <linux/hash.h> |
| #include <linux/slab.h> |
| #include <linux/sched.h> |
| #include <linux/mutex.h> |
| #include <linux/string.h> |
| #include <linux/mm.h> |
| #include <linux/socket.h> |
| #include <linux/sockios.h> |
| #include <linux/errno.h> |
| #include <linux/interrupt.h> |
| #include <linux/if_ether.h> |
| #include <linux/netdevice.h> |
| #include <linux/etherdevice.h> |
| #include <linux/ethtool.h> |
| #include <linux/notifier.h> |
| #include <linux/skbuff.h> |
| #include <net/net_namespace.h> |
| #include <net/sock.h> |
| #include <linux/rtnetlink.h> |
| #include <linux/stat.h> |
| #include <net/dst.h> |
| #include <net/dst_metadata.h> |
| #include <net/pkt_sched.h> |
| #include <net/checksum.h> |
| #include <net/xfrm.h> |
| #include <linux/highmem.h> |
| #include <linux/init.h> |
| #include <linux/module.h> |
| #include <linux/netpoll.h> |
| #include <linux/rcupdate.h> |
| #include <linux/delay.h> |
| #include <net/iw_handler.h> |
| #include <asm/current.h> |
| #include <linux/audit.h> |
| #include <linux/dmaengine.h> |
| #include <linux/err.h> |
| #include <linux/ctype.h> |
| #include <linux/if_arp.h> |
| #include <linux/if_vlan.h> |
| #include <linux/ip.h> |
| #include <net/ip.h> |
| #include <net/mpls.h> |
| #include <linux/ipv6.h> |
| #include <linux/in.h> |
| #include <linux/jhash.h> |
| #include <linux/random.h> |
| #include <trace/events/napi.h> |
| #include <trace/events/net.h> |
| #include <trace/events/skb.h> |
| #include <linux/pci.h> |
| #include <linux/inetdevice.h> |
| #include <linux/cpu_rmap.h> |
| #include <linux/static_key.h> |
| #include <linux/hashtable.h> |
| #include <linux/vmalloc.h> |
| #include <linux/if_macvlan.h> |
| #include <linux/errqueue.h> |
| #include <linux/hrtimer.h> |
| #include <linux/netfilter_ingress.h> |
| |
| #include "net-sysfs.h" |
| |
| /* Instead of increasing this, you should create a hash table. */ |
| #define MAX_GRO_SKBS 8 |
| |
| /* This should be increased if a protocol with a bigger head is added. */ |
| #define GRO_MAX_HEAD (MAX_HEADER + 128) |
| |
| static DEFINE_SPINLOCK(ptype_lock); |
| static DEFINE_SPINLOCK(offload_lock); |
| struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
| struct list_head ptype_all __read_mostly; /* Taps */ |
| static struct list_head offload_base __read_mostly; |
| |
| static int netif_rx_internal(struct sk_buff *skb); |
| static int call_netdevice_notifiers_info(unsigned long val, |
| struct net_device *dev, |
| struct netdev_notifier_info *info); |
| |
| /* |
| * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
| * semaphore. |
| * |
| * Pure readers hold dev_base_lock for reading, or rcu_read_lock() |
| * |
| * Writers must hold the rtnl semaphore while they loop through the |
| * dev_base_head list, and hold dev_base_lock for writing when they do the |
| * actual updates. This allows pure readers to access the list even |
| * while a writer is preparing to update it. |
| * |
| * To put it another way, dev_base_lock is held for writing only to |
| * protect against pure readers; the rtnl semaphore provides the |
| * protection against other writers. |
| * |
| * See, for example usages, register_netdevice() and |
| * unregister_netdevice(), which must be called with the rtnl |
| * semaphore held. |
| */ |
| DEFINE_RWLOCK(dev_base_lock); |
| EXPORT_SYMBOL(dev_base_lock); |
| |
| /* protects napi_hash addition/deletion and napi_gen_id */ |
| static DEFINE_SPINLOCK(napi_hash_lock); |
| |
| static unsigned int napi_gen_id; |
| static DEFINE_HASHTABLE(napi_hash, 8); |
| |
| static seqcount_t devnet_rename_seq; |
| |
| static inline void dev_base_seq_inc(struct net *net) |
| { |
| while (++net->dev_base_seq == 0); |
| } |
| |
| static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
| { |
| unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); |
| |
| return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; |
| } |
| |
| static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
| { |
| return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
| } |
| |
| static inline void rps_lock(struct softnet_data *sd) |
| { |
| #ifdef CONFIG_RPS |
| spin_lock(&sd->input_pkt_queue.lock); |
| #endif |
| } |
| |
| static inline void rps_unlock(struct softnet_data *sd) |
| { |
| #ifdef CONFIG_RPS |
| spin_unlock(&sd->input_pkt_queue.lock); |
| #endif |
| } |
| |
| /* Device list insertion */ |
| static void list_netdevice(struct net_device *dev) |
| { |
| struct net *net = dev_net(dev); |
| |
| ASSERT_RTNL(); |
| |
| write_lock_bh(&dev_base_lock); |
| list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); |
| hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); |
| hlist_add_head_rcu(&dev->index_hlist, |
| dev_index_hash(net, dev->ifindex)); |
| write_unlock_bh(&dev_base_lock); |
| |
| dev_base_seq_inc(net); |
| } |
| |
| /* Device list removal |
| * caller must respect a RCU grace period before freeing/reusing dev |
| */ |
| static void unlist_netdevice(struct net_device *dev) |
| { |
| ASSERT_RTNL(); |
| |
| /* Unlink dev from the device chain */ |
| write_lock_bh(&dev_base_lock); |
| list_del_rcu(&dev->dev_list); |
| hlist_del_rcu(&dev->name_hlist); |
| hlist_del_rcu(&dev->index_hlist); |
| write_unlock_bh(&dev_base_lock); |
| |
| dev_base_seq_inc(dev_net(dev)); |
| } |
| |
| /* |
| * Our notifier list |
| */ |
| |
| static RAW_NOTIFIER_HEAD(netdev_chain); |
| |
| /* |
| * Device drivers call our routines to queue packets here. We empty the |
| * queue in the local softnet handler. |
| */ |
| |
| DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
| EXPORT_PER_CPU_SYMBOL(softnet_data); |
| |
| #ifdef CONFIG_LOCKDEP |
| /* |
| * register_netdevice() inits txq->_xmit_lock and sets lockdep class |
| * according to dev->type |
| */ |
| static const unsigned short netdev_lock_type[] = |
| {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, |
| ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, |
| ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, |
| ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, |
| ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, |
| ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, |
| ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, |
| ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, |
| ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, |
| ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, |
| ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, |
| ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, |
| ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, |
| ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, |
| ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; |
| |
| static const char *const netdev_lock_name[] = |
| {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", |
| "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", |
| "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", |
| "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", |
| "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", |
| "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", |
| "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", |
| "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", |
| "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", |
| "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", |
| "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", |
| "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", |
| "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", |
| "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", |
| "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; |
| |
| static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
| static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
| |
| static inline unsigned short netdev_lock_pos(unsigned short dev_type) |
| { |
| int i; |
| |
| for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) |
| if (netdev_lock_type[i] == dev_type) |
| return i; |
| /* the last key is used by default */ |
| return ARRAY_SIZE(netdev_lock_type) - 1; |
| } |
| |
| static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
| unsigned short dev_type) |
| { |
| int i; |
| |
| i = netdev_lock_pos(dev_type); |
| lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], |
| netdev_lock_name[i]); |
| } |
| |
| static inline void netdev_set_addr_lockdep_class(struct net_device *dev) |
| { |
| int i; |
| |
| i = netdev_lock_pos(dev->type); |
| lockdep_set_class_and_name(&dev->addr_list_lock, |
| &netdev_addr_lock_key[i], |
| netdev_lock_name[i]); |
| } |
| #else |
| static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
| unsigned short dev_type) |
| { |
| } |
| static inline void netdev_set_addr_lockdep_class(struct net_device *dev) |
| { |
| } |
| #endif |
| |
| /******************************************************************************* |
| |
| Protocol management and registration routines |
| |
| *******************************************************************************/ |
| |
| /* |
| * Add a protocol ID to the list. Now that the input handler is |
| * smarter we can dispense with all the messy stuff that used to be |
| * here. |
| * |
| * BEWARE!!! Protocol handlers, mangling input packets, |
| * MUST BE last in hash buckets and checking protocol handlers |
| * MUST start from promiscuous ptype_all chain in net_bh. |
| * It is true now, do not change it. |
| * Explanation follows: if protocol handler, mangling packet, will |
| * be the first on list, it is not able to sense, that packet |
| * is cloned and should be copied-on-write, so that it will |
| * change it and subsequent readers will get broken packet. |
| * --ANK (980803) |
| */ |
| |
| static inline struct list_head *ptype_head(const struct packet_type *pt) |
| { |
| if (pt->type == htons(ETH_P_ALL)) |
| return pt->dev ? &pt->dev->ptype_all : &ptype_all; |
| else |
| return pt->dev ? &pt->dev->ptype_specific : |
| &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; |
| } |
| |
| /** |
| * dev_add_pack - add packet handler |
| * @pt: packet type declaration |
| * |
| * Add a protocol handler to the networking stack. The passed &packet_type |
| * is linked into kernel lists and may not be freed until it has been |
| * removed from the kernel lists. |
| * |
| * This call does not sleep therefore it can not |
| * guarantee all CPU's that are in middle of receiving packets |
| * will see the new packet type (until the next received packet). |
| */ |
| |
| void dev_add_pack(struct packet_type *pt) |
| { |
| struct list_head *head = ptype_head(pt); |
| |
| spin_lock(&ptype_lock); |
| list_add_rcu(&pt->list, head); |
| spin_unlock(&ptype_lock); |
| } |
| EXPORT_SYMBOL(dev_add_pack); |
| |
| /** |
| * __dev_remove_pack - remove packet handler |
| * @pt: packet type declaration |
| * |
| * Remove a protocol handler that was previously added to the kernel |
| * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
| * from the kernel lists and can be freed or reused once this function |
| * returns. |
| * |
| * The packet type might still be in use by receivers |
| * and must not be freed until after all the CPU's have gone |
| * through a quiescent state. |
| */ |
| void __dev_remove_pack(struct packet_type *pt) |
| { |
| struct list_head *head = ptype_head(pt); |
| struct packet_type *pt1; |
| |
| spin_lock(&ptype_lock); |
| |
| list_for_each_entry(pt1, head, list) { |
| if (pt == pt1) { |
| list_del_rcu(&pt->list); |
| goto out; |
| } |
| } |
| |
| pr_warn("dev_remove_pack: %p not found\n", pt); |
| out: |
| spin_unlock(&ptype_lock); |
| } |
| EXPORT_SYMBOL(__dev_remove_pack); |
| |
| /** |
| * dev_remove_pack - remove packet handler |
| * @pt: packet type declaration |
| * |
| * Remove a protocol handler that was previously added to the kernel |
| * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
| * from the kernel lists and can be freed or reused once this function |
| * returns. |
| * |
| * This call sleeps to guarantee that no CPU is looking at the packet |
| * type after return. |
| */ |
| void dev_remove_pack(struct packet_type *pt) |
| { |
| __dev_remove_pack(pt); |
| |
| synchronize_net(); |
| } |
| EXPORT_SYMBOL(dev_remove_pack); |
| |
| |
| /** |
| * dev_add_offload - register offload handlers |
| * @po: protocol offload declaration |
| * |
| * Add protocol offload handlers to the networking stack. The passed |
| * &proto_offload is linked into kernel lists and may not be freed until |
| * it has been removed from the kernel lists. |
| * |
| * This call does not sleep therefore it can not |
| * guarantee all CPU's that are in middle of receiving packets |
| * will see the new offload handlers (until the next received packet). |
| */ |
| void dev_add_offload(struct packet_offload *po) |
| { |
| struct packet_offload *elem; |
| |
| spin_lock(&offload_lock); |
| list_for_each_entry(elem, &offload_base, list) { |
| if (po->priority < elem->priority) |
| break; |
| } |
| list_add_rcu(&po->list, elem->list.prev); |
| spin_unlock(&offload_lock); |
| } |
| EXPORT_SYMBOL(dev_add_offload); |
| |
| /** |
| * __dev_remove_offload - remove offload handler |
| * @po: packet offload declaration |
| * |
| * Remove a protocol offload handler that was previously added to the |
| * kernel offload handlers by dev_add_offload(). The passed &offload_type |
| * is removed from the kernel lists and can be freed or reused once this |
| * function returns. |
| * |
| * The packet type might still be in use by receivers |
| * and must not be freed until after all the CPU's have gone |
| * through a quiescent state. |
| */ |
| static void __dev_remove_offload(struct packet_offload *po) |
| { |
| struct list_head *head = &offload_base; |
| struct packet_offload *po1; |
| |
| spin_lock(&offload_lock); |
| |
| list_for_each_entry(po1, head, list) { |
| if (po == po1) { |
| list_del_rcu(&po->list); |
| goto out; |
| } |
| } |
| |
| pr_warn("dev_remove_offload: %p not found\n", po); |
| out: |
| spin_unlock(&offload_lock); |
| } |
| |
| /** |
| * dev_remove_offload - remove packet offload handler |
| * @po: packet offload declaration |
| * |
| * Remove a packet offload handler that was previously added to the kernel |
| * offload handlers by dev_add_offload(). The passed &offload_type is |
| * removed from the kernel lists and can be freed or reused once this |
| * function returns. |
| * |
| * This call sleeps to guarantee that no CPU is looking at the packet |
| * type after return. |
| */ |
| void dev_remove_offload(struct packet_offload *po) |
| { |
| __dev_remove_offload(po); |
| |
| synchronize_net(); |
| } |
| EXPORT_SYMBOL(dev_remove_offload); |
| |
| /****************************************************************************** |
| |
| Device Boot-time Settings Routines |
| |
| *******************************************************************************/ |
| |
| /* Boot time configuration table */ |
| static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; |
| |
| /** |
| * netdev_boot_setup_add - add new setup entry |
| * @name: name of the device |
| * @map: configured settings for the device |
| * |
| * Adds new setup entry to the dev_boot_setup list. The function |
| * returns 0 on error and 1 on success. This is a generic routine to |
| * all netdevices. |
| */ |
| static int netdev_boot_setup_add(char *name, struct ifmap *map) |
| { |
| struct netdev_boot_setup *s; |
| int i; |
| |
| s = dev_boot_setup; |
| for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { |
| if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { |
| memset(s[i].name, 0, sizeof(s[i].name)); |
| strlcpy(s[i].name, name, IFNAMSIZ); |
| memcpy(&s[i].map, map, sizeof(s[i].map)); |
| break; |
| } |
| } |
| |
| return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; |
| } |
| |
| /** |
| * netdev_boot_setup_check - check boot time settings |
| * @dev: the netdevice |
| * |
| * Check boot time settings for the device. |
| * The found settings are set for the device to be used |
| * later in the device probing. |
| * Returns 0 if no settings found, 1 if they are. |
| */ |
| int netdev_boot_setup_check(struct net_device *dev) |
| { |
| struct netdev_boot_setup *s = dev_boot_setup; |
| int i; |
| |
| for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { |
| if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && |
| !strcmp(dev->name, s[i].name)) { |
| dev->irq = s[i].map.irq; |
| dev->base_addr = s[i].map.base_addr; |
| dev->mem_start = s[i].map.mem_start; |
| dev->mem_end = s[i].map.mem_end; |
| return 1; |
| } |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_boot_setup_check); |
| |
| |
| /** |
| * netdev_boot_base - get address from boot time settings |
| * @prefix: prefix for network device |
| * @unit: id for network device |
| * |
| * Check boot time settings for the base address of device. |
| * The found settings are set for the device to be used |
| * later in the device probing. |
| * Returns 0 if no settings found. |
| */ |
| unsigned long netdev_boot_base(const char *prefix, int unit) |
| { |
| const struct netdev_boot_setup *s = dev_boot_setup; |
| char name[IFNAMSIZ]; |
| int i; |
| |
| sprintf(name, "%s%d", prefix, unit); |
| |
| /* |
| * If device already registered then return base of 1 |
| * to indicate not to probe for this interface |
| */ |
| if (__dev_get_by_name(&init_net, name)) |
| return 1; |
| |
| for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) |
| if (!strcmp(name, s[i].name)) |
| return s[i].map.base_addr; |
| return 0; |
| } |
| |
| /* |
| * Saves at boot time configured settings for any netdevice. |
| */ |
| int __init netdev_boot_setup(char *str) |
| { |
| int ints[5]; |
| struct ifmap map; |
| |
| str = get_options(str, ARRAY_SIZE(ints), ints); |
| if (!str || !*str) |
| return 0; |
| |
| /* Save settings */ |
| memset(&map, 0, sizeof(map)); |
| if (ints[0] > 0) |
| map.irq = ints[1]; |
| if (ints[0] > 1) |
| map.base_addr = ints[2]; |
| if (ints[0] > 2) |
| map.mem_start = ints[3]; |
| if (ints[0] > 3) |
| map.mem_end = ints[4]; |
| |
| /* Add new entry to the list */ |
| return netdev_boot_setup_add(str, &map); |
| } |
| |
| __setup("netdev=", netdev_boot_setup); |
| |
| /******************************************************************************* |
| |
| Device Interface Subroutines |
| |
| *******************************************************************************/ |
| |
| /** |
| * dev_get_iflink - get 'iflink' value of a interface |
| * @dev: targeted interface |
| * |
| * Indicates the ifindex the interface is linked to. |
| * Physical interfaces have the same 'ifindex' and 'iflink' values. |
| */ |
| |
| int dev_get_iflink(const struct net_device *dev) |
| { |
| if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) |
| return dev->netdev_ops->ndo_get_iflink(dev); |
| |
| return dev->ifindex; |
| } |
| EXPORT_SYMBOL(dev_get_iflink); |
| |
| /** |
| * dev_fill_metadata_dst - Retrieve tunnel egress information. |
| * @dev: targeted interface |
| * @skb: The packet. |
| * |
| * For better visibility of tunnel traffic OVS needs to retrieve |
| * egress tunnel information for a packet. Following API allows |
| * user to get this info. |
| */ |
| int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
| { |
| struct ip_tunnel_info *info; |
| |
| if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) |
| return -EINVAL; |
| |
| info = skb_tunnel_info_unclone(skb); |
| if (!info) |
| return -ENOMEM; |
| if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) |
| return -EINVAL; |
| |
| return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); |
| } |
| EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); |
| |
| /** |
| * __dev_get_by_name - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. Must be called under RTNL semaphore |
| * or @dev_base_lock. If the name is found a pointer to the device |
| * is returned. If the name is not found then %NULL is returned. The |
| * reference counters are not incremented so the caller must be |
| * careful with locks. |
| */ |
| |
| struct net_device *__dev_get_by_name(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_name_hash(net, name); |
| |
| hlist_for_each_entry(dev, head, name_hlist) |
| if (!strncmp(dev->name, name, IFNAMSIZ)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_get_by_name); |
| |
| /** |
| * dev_get_by_name_rcu - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. |
| * If the name is found a pointer to the device is returned. |
| * If the name is not found then %NULL is returned. |
| * The reference counters are not incremented so the caller must be |
| * careful with locks. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_name_hash(net, name); |
| |
| hlist_for_each_entry_rcu(dev, head, name_hlist) |
| if (!strncmp(dev->name, name, IFNAMSIZ)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_name_rcu); |
| |
| /** |
| * dev_get_by_name - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. This can be called from any |
| * context and does its own locking. The returned handle has |
| * the usage count incremented and the caller must use dev_put() to |
| * release it when it is no longer needed. %NULL is returned if no |
| * matching device is found. |
| */ |
| |
| struct net_device *dev_get_by_name(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| |
| rcu_read_lock(); |
| dev = dev_get_by_name_rcu(net, name); |
| if (dev) |
| dev_hold(dev); |
| rcu_read_unlock(); |
| return dev; |
| } |
| EXPORT_SYMBOL(dev_get_by_name); |
| |
| /** |
| * __dev_get_by_index - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not |
| * had its reference counter increased so the caller must be careful |
| * about locking. The caller must hold either the RTNL semaphore |
| * or @dev_base_lock. |
| */ |
| |
| struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_index_hash(net, ifindex); |
| |
| hlist_for_each_entry(dev, head, index_hlist) |
| if (dev->ifindex == ifindex) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_get_by_index); |
| |
| /** |
| * dev_get_by_index_rcu - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not |
| * had its reference counter increased so the caller must be careful |
| * about locking. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_index_hash(net, ifindex); |
| |
| hlist_for_each_entry_rcu(dev, head, index_hlist) |
| if (dev->ifindex == ifindex) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_index_rcu); |
| |
| |
| /** |
| * dev_get_by_index - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns NULL if the device |
| * is not found or a pointer to the device. The device returned has |
| * had a reference added and the pointer is safe until the user calls |
| * dev_put to indicate they have finished with it. |
| */ |
| |
| struct net_device *dev_get_by_index(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| |
| rcu_read_lock(); |
| dev = dev_get_by_index_rcu(net, ifindex); |
| if (dev) |
| dev_hold(dev); |
| rcu_read_unlock(); |
| return dev; |
| } |
| EXPORT_SYMBOL(dev_get_by_index); |
| |
| /** |
| * netdev_get_name - get a netdevice name, knowing its ifindex. |
| * @net: network namespace |
| * @name: a pointer to the buffer where the name will be stored. |
| * @ifindex: the ifindex of the interface to get the name from. |
| * |
| * The use of raw_seqcount_begin() and cond_resched() before |
| * retrying is required as we want to give the writers a chance |
| * to complete when CONFIG_PREEMPT is not set. |
| */ |
| int netdev_get_name(struct net *net, char *name, int ifindex) |
| { |
| struct net_device *dev; |
| unsigned int seq; |
| |
| retry: |
| seq = raw_seqcount_begin(&devnet_rename_seq); |
| rcu_read_lock(); |
| dev = dev_get_by_index_rcu(net, ifindex); |
| if (!dev) { |
| rcu_read_unlock(); |
| return -ENODEV; |
| } |
| |
| strcpy(name, dev->name); |
| rcu_read_unlock(); |
| if (read_seqcount_retry(&devnet_rename_seq, seq)) { |
| cond_resched(); |
| goto retry; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * dev_getbyhwaddr_rcu - find a device by its hardware address |
| * @net: the applicable net namespace |
| * @type: media type of device |
| * @ha: hardware address |
| * |
| * Search for an interface by MAC address. Returns NULL if the device |
| * is not found or a pointer to the device. |
| * The caller must hold RCU or RTNL. |
| * The returned device has not had its ref count increased |
| * and the caller must therefore be careful about locking |
| * |
| */ |
| |
| struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, |
| const char *ha) |
| { |
| struct net_device *dev; |
| |
| for_each_netdev_rcu(net, dev) |
| if (dev->type == type && |
| !memcmp(dev->dev_addr, ha, dev->addr_len)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_getbyhwaddr_rcu); |
| |
| struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) |
| { |
| struct net_device *dev; |
| |
| ASSERT_RTNL(); |
| for_each_netdev(net, dev) |
| if (dev->type == type) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_getfirstbyhwtype); |
| |
| struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
| { |
| struct net_device *dev, *ret = NULL; |
| |
| rcu_read_lock(); |
| for_each_netdev_rcu(net, dev) |
| if (dev->type == type) { |
| dev_hold(dev); |
| ret = dev; |
| break; |
| } |
| rcu_read_unlock(); |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_getfirstbyhwtype); |
| |
| /** |
| * __dev_get_by_flags - find any device with given flags |
| * @net: the applicable net namespace |
| * @if_flags: IFF_* values |
| * @mask: bitmask of bits in if_flags to check |
| * |
| * Search for any interface with the given flags. Returns NULL if a device |
| * is not found or a pointer to the device. Must be called inside |
| * rtnl_lock(), and result refcount is unchanged. |
| */ |
| |
| struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, |
| unsigned short mask) |
| { |
| struct net_device *dev, *ret; |
| |
| ASSERT_RTNL(); |
| |
| ret = NULL; |
| for_each_netdev(net, dev) { |
| if (((dev->flags ^ if_flags) & mask) == 0) { |
| ret = dev; |
| break; |
| } |
| } |
| return ret; |
| } |
| EXPORT_SYMBOL(__dev_get_by_flags); |
| |
| /** |
| * dev_valid_name - check if name is okay for network device |
| * @name: name string |
| * |
| * Network device names need to be valid file names to |
| * to allow sysfs to work. We also disallow any kind of |
| * whitespace. |
| */ |
| bool dev_valid_name(const char *name) |
| { |
| if (*name == '\0') |
| return false; |
| if (strlen(name) >= IFNAMSIZ) |
| return false; |
| if (!strcmp(name, ".") || !strcmp(name, "..")) |
| return false; |
| |
| while (*name) { |
| if (*name == '/' || *name == ':' || isspace(*name)) |
| return false; |
| name++; |
| } |
| return true; |
| } |
| EXPORT_SYMBOL(dev_valid_name); |
| |
| /** |
| * __dev_alloc_name - allocate a name for a device |
| * @net: network namespace to allocate the device name in |
| * @name: name format string |
| * @buf: scratch buffer and result name string |
| * |
| * Passed a format string - eg "lt%d" it will try and find a suitable |
| * id. It scans list of devices to build up a free map, then chooses |
| * the first empty slot. The caller must hold the dev_base or rtnl lock |
| * while allocating the name and adding the device in order to avoid |
| * duplicates. |
| * Limited to bits_per_byte * page size devices (ie 32K on most platforms). |
| * Returns the number of the unit assigned or a negative errno code. |
| */ |
| |
| static int __dev_alloc_name(struct net *net, const char *name, char *buf) |
| { |
| int i = 0; |
| const char *p; |
| const int max_netdevices = 8*PAGE_SIZE; |
| unsigned long *inuse; |
| struct net_device *d; |
| |
| p = strnchr(name, IFNAMSIZ-1, '%'); |
| if (p) { |
| /* |
| * Verify the string as this thing may have come from |
| * the user. There must be either one "%d" and no other "%" |
| * characters. |
| */ |
| if (p[1] != 'd' || strchr(p + 2, '%')) |
| return -EINVAL; |
| |
| /* Use one page as a bit array of possible slots */ |
| inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); |
| if (!inuse) |
| return -ENOMEM; |
| |
| for_each_netdev(net, d) { |
| if (!sscanf(d->name, name, &i)) |
| continue; |
| if (i < 0 || i >= max_netdevices) |
| continue; |
| |
| /* avoid cases where sscanf is not exact inverse of printf */ |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!strncmp(buf, d->name, IFNAMSIZ)) |
| set_bit(i, inuse); |
| } |
| |
| i = find_first_zero_bit(inuse, max_netdevices); |
| free_page((unsigned long) inuse); |
| } |
| |
| if (buf != name) |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!__dev_get_by_name(net, buf)) |
| return i; |
| |
| /* It is possible to run out of possible slots |
| * when the name is long and there isn't enough space left |
| * for the digits, or if all bits are used. |
| */ |
| return -ENFILE; |
| } |
| |
| /** |
| * dev_alloc_name - allocate a name for a device |
| * @dev: device |
| * @name: name format string |
| * |
| * Passed a format string - eg "lt%d" it will try and find a suitable |
| * id. It scans list of devices to build up a free map, then chooses |
| * the first empty slot. The caller must hold the dev_base or rtnl lock |
| * while allocating the name and adding the device in order to avoid |
| * duplicates. |
| * Limited to bits_per_byte * page size devices (ie 32K on most platforms). |
| * Returns the number of the unit assigned or a negative errno code. |
| */ |
| |
| int dev_alloc_name(struct net_device *dev, const char *name) |
| { |
| char buf[IFNAMSIZ]; |
| struct net *net; |
| int ret; |
| |
| BUG_ON(!dev_net(dev)); |
| net = dev_net(dev); |
| ret = __dev_alloc_name(net, name, buf); |
| if (ret >= 0) |
| strlcpy(dev->name, buf, IFNAMSIZ); |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_alloc_name); |
| |
| static int dev_alloc_name_ns(struct net *net, |
| struct net_device *dev, |
| const char *name) |
| { |
| char buf[IFNAMSIZ]; |
| int ret; |
| |
| ret = __dev_alloc_name(net, name, buf); |
| if (ret >= 0) |
| strlcpy(dev->name, buf, IFNAMSIZ); |
| return ret; |
| } |
| |
| static int dev_get_valid_name(struct net *net, |
| struct net_device *dev, |
| const char *name) |
| { |
| BUG_ON(!net); |
| |
| if (!dev_valid_name(name)) |
| return -EINVAL; |
| |
| if (strchr(name, '%')) |
| return dev_alloc_name_ns(net, dev, name); |
| else if (__dev_get_by_name(net, name)) |
| return -EEXIST; |
| else if (dev->name != name) |
| strlcpy(dev->name, name, IFNAMSIZ); |
| |
| return 0; |
| } |
| |
| /** |
| * dev_change_name - change name of a device |
| * @dev: device |
| * @newname: name (or format string) must be at least IFNAMSIZ |
| * |
| * Change name of a device, can pass format strings "eth%d". |
| * for wildcarding. |
| */ |
| int dev_change_name(struct net_device *dev, const char *newname) |
| { |
| unsigned char old_assign_type; |
| char oldname[IFNAMSIZ]; |
| int err = 0; |
| int ret; |
| struct net *net; |
| |
| ASSERT_RTNL(); |
| BUG_ON(!dev_net(dev)); |
| |
| net = dev_net(dev); |
| if (dev->flags & IFF_UP) |
| return -EBUSY; |
| |
| write_seqcount_begin(&devnet_rename_seq); |
| |
| if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { |
| write_seqcount_end(&devnet_rename_seq); |
| return 0; |
| } |
| |
| memcpy(oldname, dev->name, IFNAMSIZ); |
| |
| err = dev_get_valid_name(net, dev, newname); |
| if (err < 0) { |
| write_seqcount_end(&devnet_rename_seq); |
| return err; |
| } |
| |
| if (oldname[0] && !strchr(oldname, '%')) |
| netdev_info(dev, "renamed from %s\n", oldname); |
| |
| old_assign_type = dev->name_assign_type; |
| dev->name_assign_type = NET_NAME_RENAMED; |
| |
| rollback: |
| ret = device_rename(&dev->dev, dev->name); |
| if (ret) { |
| memcpy(dev->name, oldname, IFNAMSIZ); |
| dev->name_assign_type = old_assign_type; |
| write_seqcount_end(&devnet_rename_seq); |
| return ret; |
| } |
| |
| write_seqcount_end(&devnet_rename_seq); |
| |
| netdev_adjacent_rename_links(dev, oldname); |
| |
| write_lock_bh(&dev_base_lock); |
| hlist_del_rcu(&dev->name_hlist); |
| write_unlock_bh(&dev_base_lock); |
| |
| synchronize_rcu(); |
| |
| write_lock_bh(&dev_base_lock); |
| hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); |
| write_unlock_bh(&dev_base_lock); |
| |
| ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); |
| ret = notifier_to_errno(ret); |
| |
| if (ret) { |
| /* err >= 0 after dev_alloc_name() or stores the first errno */ |
| if (err >= 0) { |
| err = ret; |
| write_seqcount_begin(&devnet_rename_seq); |
| memcpy(dev->name, oldname, IFNAMSIZ); |
| memcpy(oldname, newname, IFNAMSIZ); |
| dev->name_assign_type = old_assign_type; |
| old_assign_type = NET_NAME_RENAMED; |
| goto rollback; |
| } else { |
| pr_err("%s: name change rollback failed: %d\n", |
| dev->name, ret); |
| } |
| } |
| |
| return err; |
| } |
| |
| /** |
| * dev_set_alias - change ifalias of a device |
| * @dev: device |
| * @alias: name up to IFALIASZ |
| * @len: limit of bytes to copy from info |
| * |
| * Set ifalias for a device, |
| */ |
| int dev_set_alias(struct net_device *dev, const char *alias, size_t len) |
| { |
| char *new_ifalias; |
| |
| ASSERT_RTNL(); |
| |
| if (len >= IFALIASZ) |
| return -EINVAL; |
| |
| if (!len) { |
| kfree(dev->ifalias); |
| dev->ifalias = NULL; |
| return 0; |
| } |
| |
| new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); |
| if (!new_ifalias) |
| return -ENOMEM; |
| dev->ifalias = new_ifalias; |
| |
| strlcpy(dev->ifalias, alias, len+1); |
| return len; |
| } |
| |
| |
| /** |
| * netdev_features_change - device changes features |
| * @dev: device to cause notification |
| * |
| * Called to indicate a device has changed features. |
| */ |
| void netdev_features_change(struct net_device *dev) |
| { |
| call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); |
| } |
| EXPORT_SYMBOL(netdev_features_change); |
| |
| /** |
| * netdev_state_change - device changes state |
| * @dev: device to cause notification |
| * |
| * Called to indicate a device has changed state. This function calls |
| * the notifier chains for netdev_chain and sends a NEWLINK message |
| * to the routing socket. |
| */ |
| void netdev_state_change(struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| struct netdev_notifier_change_info change_info; |
| |
| change_info.flags_changed = 0; |
| call_netdevice_notifiers_info(NETDEV_CHANGE, dev, |
| &change_info.info); |
| rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); |
| } |
| } |
| EXPORT_SYMBOL(netdev_state_change); |
| |
| /** |
| * netdev_notify_peers - notify network peers about existence of @dev |
| * @dev: network device |
| * |
| * Generate traffic such that interested network peers are aware of |
| * @dev, such as by generating a gratuitous ARP. This may be used when |
| * a device wants to inform the rest of the network about some sort of |
| * reconfiguration such as a failover event or virtual machine |
| * migration. |
| */ |
| void netdev_notify_peers(struct net_device *dev) |
| { |
| rtnl_lock(); |
| call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); |
| rtnl_unlock(); |
| } |
| EXPORT_SYMBOL(netdev_notify_peers); |
| |
| static int __dev_open(struct net_device *dev) |
| { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| int ret; |
| |
| ASSERT_RTNL(); |
| |
| if (!netif_device_present(dev)) |
| return -ENODEV; |
| |
| /* Block netpoll from trying to do any rx path servicing. |
| * If we don't do this there is a chance ndo_poll_controller |
| * or ndo_poll may be running while we open the device |
| */ |
| netpoll_poll_disable(dev); |
| |
| ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); |
| ret = notifier_to_errno(ret); |
| if (ret) |
| return ret; |
| |
| set_bit(__LINK_STATE_START, &dev->state); |
| |
| if (ops->ndo_validate_addr) |
| ret = ops->ndo_validate_addr(dev); |
| |
| if (!ret && ops->ndo_open) |
| ret = ops->ndo_open(dev); |
| |
| netpoll_poll_enable(dev); |
| |
| if (ret) |
| clear_bit(__LINK_STATE_START, &dev->state); |
| else { |
| dev->flags |= IFF_UP; |
| dev_set_rx_mode(dev); |
| dev_activate(dev); |
| add_device_randomness(dev->dev_addr, dev->addr_len); |
| } |
| |
| return ret; |
| } |
| |
| /** |
| * dev_open - prepare an interface for use. |
| * @dev: device to open |
| * |
| * Takes a device from down to up state. The device's private open |
| * function is invoked and then the multicast lists are loaded. Finally |
| * the device is moved into the up state and a %NETDEV_UP message is |
| * sent to the netdev notifier chain. |
| * |
| * Calling this function on an active interface is a nop. On a failure |
| * a negative errno code is returned. |
| */ |
| int dev_open(struct net_device *dev) |
| { |
| int ret; |
| |
| if (dev->flags & IFF_UP) |
| return 0; |
| |
| ret = __dev_open(dev); |
| if (ret < 0) |
| return ret; |
| |
| rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| call_netdevice_notifiers(NETDEV_UP, dev); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_open); |
| |
| static int __dev_close_many(struct list_head *head) |
| { |
| struct net_device *dev; |
| |
| ASSERT_RTNL(); |
| might_sleep(); |
| |
| list_for_each_entry(dev, head, close_list) { |
| /* Temporarily disable netpoll until the interface is down */ |
| netpoll_poll_disable(dev); |
| |
| call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); |
| |
| clear_bit(__LINK_STATE_START, &dev->state); |
| |
| /* Synchronize to scheduled poll. We cannot touch poll list, it |
| * can be even on different cpu. So just clear netif_running(). |
| * |
| * dev->stop() will invoke napi_disable() on all of it's |
| * napi_struct instances on this device. |
| */ |
| smp_mb__after_atomic(); /* Commit netif_running(). */ |
| } |
| |
| dev_deactivate_many(head); |
| |
| list_for_each_entry(dev, head, close_list) { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| |
| /* |
| * Call the device specific close. This cannot fail. |
| * Only if device is UP |
| * |
| * We allow it to be called even after a DETACH hot-plug |
| * event. |
| */ |
| if (ops->ndo_stop) |
| ops->ndo_stop(dev); |
| |
| dev->flags &= ~IFF_UP; |
| netpoll_poll_enable(dev); |
| } |
| |
| return 0; |
| } |
| |
| static int __dev_close(struct net_device *dev) |
| { |
| int retval; |
| LIST_HEAD(single); |
| |
| list_add(&dev->close_list, &single); |
| retval = __dev_close_many(&single); |
| list_del(&single); |
| |
| return retval; |
| } |
| |
| int dev_close_many(struct list_head *head, bool unlink) |
| { |
| struct net_device *dev, *tmp; |
| |
| /* Remove the devices that don't need to be closed */ |
| list_for_each_entry_safe(dev, tmp, head, close_list) |
| if (!(dev->flags & IFF_UP)) |
| list_del_init(&dev->close_list); |
| |
| __dev_close_many(head); |
| |
| list_for_each_entry_safe(dev, tmp, head, close_list) { |
| rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| call_netdevice_notifiers(NETDEV_DOWN, dev); |
| if (unlink) |
| list_del_init(&dev->close_list); |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_close_many); |
| |
| /** |
| * dev_close - shutdown an interface. |
| * @dev: device to shutdown |
| * |
| * This function moves an active device into down state. A |
| * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device |
| * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier |
| * chain. |
| */ |
| int dev_close(struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| LIST_HEAD(single); |
| |
| list_add(&dev->close_list, &single); |
| dev_close_many(&single, true); |
| list_del(&single); |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_close); |
| |
| |
| /** |
| * dev_disable_lro - disable Large Receive Offload on a device |
| * @dev: device |
| * |
| * Disable Large Receive Offload (LRO) on a net device. Must be |
| * called under RTNL. This is needed if received packets may be |
| * forwarded to another interface. |
| */ |
| void dev_disable_lro(struct net_device *dev) |
| { |
| struct net_device *lower_dev; |
| struct list_head *iter; |
| |
| dev->wanted_features &= ~NETIF_F_LRO; |
| netdev_update_features(dev); |
| |
| if (unlikely(dev->features & NETIF_F_LRO)) |
| netdev_WARN(dev, "failed to disable LRO!\n"); |
| |
| netdev_for_each_lower_dev(dev, lower_dev, iter) |
| dev_disable_lro(lower_dev); |
| } |
| EXPORT_SYMBOL(dev_disable_lro); |
| |
| static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, |
| struct net_device *dev) |
| { |
| struct netdev_notifier_info info; |
| |
| netdev_notifier_info_init(&info, dev); |
| return nb->notifier_call(nb, val, &info); |
| } |
| |
| static int dev_boot_phase = 1; |
| |
| /** |
| * register_netdevice_notifier - register a network notifier block |
| * @nb: notifier |
| * |
| * Register a notifier to be called when network device events occur. |
| * The notifier passed is linked into the kernel structures and must |
| * not be reused until it has been unregistered. A negative errno code |
| * is returned on a failure. |
| * |
| * When registered all registration and up events are replayed |
| * to the new notifier to allow device to have a race free |
| * view of the network device list. |
| */ |
| |
| int register_netdevice_notifier(struct notifier_block *nb) |
| { |
| struct net_device *dev; |
| struct net_device *last; |
| struct net *net; |
| int err; |
| |
| rtnl_lock(); |
| err = raw_notifier_chain_register(&netdev_chain, nb); |
| if (err) |
| goto unlock; |
| if (dev_boot_phase) |
| goto unlock; |
| for_each_net(net) { |
| for_each_netdev(net, dev) { |
| err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); |
| err = notifier_to_errno(err); |
| if (err) |
| goto rollback; |
| |
| if (!(dev->flags & IFF_UP)) |
| continue; |
| |
| call_netdevice_notifier(nb, NETDEV_UP, dev); |
| } |
| } |
| |
| unlock: |
| rtnl_unlock(); |
| return err; |
| |
| rollback: |
| last = dev; |
| for_each_net(net) { |
| for_each_netdev(net, dev) { |
| if (dev == last) |
| goto outroll; |
| |
| if (dev->flags & IFF_UP) { |
| call_netdevice_notifier(nb, NETDEV_GOING_DOWN, |
| dev); |
| call_netdevice_notifier(nb, NETDEV_DOWN, dev); |
| } |
| call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); |
| } |
| } |
| |
| outroll: |
| raw_notifier_chain_unregister(&netdev_chain, nb); |
| goto unlock; |
| } |
| EXPORT_SYMBOL(register_netdevice_notifier); |
| |
| /** |
| * unregister_netdevice_notifier - unregister a network notifier block |
| * @nb: notifier |
| * |
| * Unregister a notifier previously registered by |
| * register_netdevice_notifier(). The notifier is unlinked into the |
| * kernel structures and may then be reused. A negative errno code |
| * is returned on a failure. |
| * |
| * After unregistering unregister and down device events are synthesized |
| * for all devices on the device list to the removed notifier to remove |
| * the need for special case cleanup code. |
| */ |
| |
| int unregister_netdevice_notifier(struct notifier_block *nb) |
| { |
| struct net_device *dev; |
| struct net *net; |
| int err; |
| |
| rtnl_lock(); |
| err = raw_notifier_chain_unregister(&netdev_chain, nb); |
| if (err) |
| goto unlock; |
| |
| for_each_net(net) { |
| for_each_netdev(net, dev) { |
| if (dev->flags & IFF_UP) { |
| call_netdevice_notifier(nb, NETDEV_GOING_DOWN, |
| dev); |
| call_netdevice_notifier(nb, NETDEV_DOWN, dev); |
| } |
| call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); |
| } |
| } |
| unlock: |
| rtnl_unlock(); |
| return err; |
| } |
| EXPORT_SYMBOL(unregister_netdevice_notifier); |
| |
| /** |
| * call_netdevice_notifiers_info - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * @info: notifier information data |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| |
| static int call_netdevice_notifiers_info(unsigned long val, |
| struct net_device *dev, |
| struct netdev_notifier_info *info) |
| { |
| ASSERT_RTNL(); |
| netdev_notifier_info_init(info, dev); |
| return raw_notifier_call_chain(&netdev_chain, val, info); |
| } |
| |
| /** |
| * call_netdevice_notifiers - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| |
| int call_netdevice_notifiers(unsigned long val, struct net_device *dev) |
| { |
| struct netdev_notifier_info info; |
| |
| return call_netdevice_notifiers_info(val, dev, &info); |
| } |
| EXPORT_SYMBOL(call_netdevice_notifiers); |
| |
| #ifdef CONFIG_NET_INGRESS |
| static struct static_key ingress_needed __read_mostly; |
| |
| void net_inc_ingress_queue(void) |
| { |
| static_key_slow_inc(&ingress_needed); |
| } |
| EXPORT_SYMBOL_GPL(net_inc_ingress_queue); |
| |
| void net_dec_ingress_queue(void) |
| { |
| static_key_slow_dec(&ingress_needed); |
| } |
| EXPORT_SYMBOL_GPL(net_dec_ingress_queue); |
| #endif |
| |
| static struct static_key netstamp_needed __read_mostly; |
| #ifdef HAVE_JUMP_LABEL |
| /* We are not allowed to call static_key_slow_dec() from irq context |
| * If net_disable_timestamp() is called from irq context, defer the |
| * static_key_slow_dec() calls. |
| */ |
| static atomic_t netstamp_needed_deferred; |
| #endif |
| |
| void net_enable_timestamp(void) |
| { |
| #ifdef HAVE_JUMP_LABEL |
| int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
| |
| if (deferred) { |
| while (--deferred) |
| static_key_slow_dec(&netstamp_needed); |
| return; |
| } |
| #endif |
| static_key_slow_inc(&netstamp_needed); |
| } |
| EXPORT_SYMBOL(net_enable_timestamp); |
| |
| void net_disable_timestamp(void) |
| { |
| #ifdef HAVE_JUMP_LABEL |
| if (in_interrupt()) { |
| atomic_inc(&netstamp_needed_deferred); |
| return; |
| } |
| #endif |
| static_key_slow_dec(&netstamp_needed); |
| } |
| EXPORT_SYMBOL(net_disable_timestamp); |
| |
| static inline void net_timestamp_set(struct sk_buff *skb) |
| { |
| skb->tstamp.tv64 = 0; |
| if (static_key_false(&netstamp_needed)) |
| __net_timestamp(skb); |
| } |
| |
| #define net_timestamp_check(COND, SKB) \ |
| if (static_key_false(&netstamp_needed)) { \ |
| if ((COND) && !(SKB)->tstamp.tv64) \ |
| __net_timestamp(SKB); \ |
| } \ |
| |
| bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) |
| { |
| unsigned int len; |
| |
| if (!(dev->flags & IFF_UP)) |
| return false; |
| |
| len = dev->mtu + dev->hard_header_len + VLAN_HLEN; |
| if (skb->len <= len) |
| return true; |
| |
| /* if TSO is enabled, we don't care about the length as the packet |
| * could be forwarded without being segmented before |
| */ |
| if (skb_is_gso(skb)) |
| return true; |
| |
| return false; |
| } |
| EXPORT_SYMBOL_GPL(is_skb_forwardable); |
| |
| int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| { |
| if (skb_orphan_frags(skb, GFP_ATOMIC) || |
| unlikely(!is_skb_forwardable(dev, skb))) { |
| atomic_long_inc(&dev->rx_dropped); |
| kfree_skb(skb); |
| return NET_RX_DROP; |
| } |
| |
| skb_scrub_packet(skb, true); |
| skb->priority = 0; |
| skb->protocol = eth_type_trans(skb, dev); |
| skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(__dev_forward_skb); |
| |
| /** |
| * dev_forward_skb - loopback an skb to another netif |
| * |
| * @dev: destination network device |
| * @skb: buffer to forward |
| * |
| * return values: |
| * NET_RX_SUCCESS (no congestion) |
| * NET_RX_DROP (packet was dropped, but freed) |
| * |
| * dev_forward_skb can be used for injecting an skb from the |
| * start_xmit function of one device into the receive queue |
| * of another device. |
| * |
| * The receiving device may be in another namespace, so |
| * we have to clear all information in the skb that could |
| * impact namespace isolation. |
| */ |
| int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| { |
| return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); |
| } |
| EXPORT_SYMBOL_GPL(dev_forward_skb); |
| |
| static inline int deliver_skb(struct sk_buff *skb, |
| struct packet_type *pt_prev, |
| struct net_device *orig_dev) |
| { |
| if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |
| return -ENOMEM; |
| atomic_inc(&skb->users); |
| return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
| } |
| |
| static inline void deliver_ptype_list_skb(struct sk_buff *skb, |
| struct packet_type **pt, |
| struct net_device *orig_dev, |
| __be16 type, |
| struct list_head *ptype_list) |
| { |
| struct packet_type *ptype, *pt_prev = *pt; |
| |
| list_for_each_entry_rcu(ptype, ptype_list, list) { |
| if (ptype->type != type) |
| continue; |
| if (pt_prev) |
| deliver_skb(skb, pt_prev, orig_dev); |
| pt_prev = ptype; |
| } |
| *pt = pt_prev; |
| } |
| |
| static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) |
| { |
| if (!ptype->af_packet_priv || !skb->sk) |
| return false; |
| |
| if (ptype->id_match) |
| return ptype->id_match(ptype, skb->sk); |
| else if ((struct sock *)ptype->af_packet_priv == skb->sk) |
| return true; |
| |
| return false; |
| } |
| |
| /* |
| * Support routine. Sends outgoing frames to any network |
| * taps currently in use. |
| */ |
| |
| static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) |
| { |
| struct packet_type *ptype; |
| struct sk_buff *skb2 = NULL; |
| struct packet_type *pt_prev = NULL; |
| struct list_head *ptype_list = &ptype_all; |
| |
| rcu_read_lock(); |
| again: |
| list_for_each_entry_rcu(ptype, ptype_list, list) { |
| /* Never send packets back to the socket |
| * they originated from - MvS (miquels@drinkel.ow.org) |
| */ |
| if (skb_loop_sk(ptype, skb)) |
| continue; |
| |
| if (pt_prev) { |
| deliver_skb(skb2, pt_prev, skb->dev); |
| pt_prev = ptype; |
| continue; |
| } |
| |
| /* need to clone skb, done only once */ |
| skb2 = skb_clone(skb, GFP_ATOMIC); |
| if (!skb2) |
| goto out_unlock; |
| |
| net_timestamp_set(skb2); |
| |
| /* skb->nh should be correctly |
| * set by sender, so that the second statement is |
| * just protection against buggy protocols. |
| */ |
| skb_reset_mac_header(skb2); |
| |
| if (skb_network_header(skb2) < skb2->data || |
| skb_network_header(skb2) > skb_tail_pointer(skb2)) { |
| net_crit_ratelimited("protocol %04x is buggy, dev %s\n", |
| ntohs(skb2->protocol), |
| dev->name); |
| skb_reset_network_header(skb2); |
| } |
| |
| skb2->transport_header = skb2->network_header; |
| skb2->pkt_type = PACKET_OUTGOING; |
| pt_prev = ptype; |
| } |
| |
| if (ptype_list == &ptype_all) { |
| ptype_list = &dev->ptype_all; |
| goto again; |
| } |
| out_unlock: |
| if (pt_prev) |
| pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); |
| rcu_read_unlock(); |
| } |
| |
| /** |
| * netif_setup_tc - Handle tc mappings on real_num_tx_queues change |
| * @dev: Network device |
| * @txq: number of queues available |
| * |
| * If real_num_tx_queues is changed the tc mappings may no longer be |
| * valid. To resolve this verify the tc mapping remains valid and if |
| * not NULL the mapping. With no priorities mapping to this |
| * offset/count pair it will no longer be used. In the worst case TC0 |
| * is invalid nothing can be done so disable priority mappings. If is |
| * expected that drivers will fix this mapping if they can before |
| * calling netif_set_real_num_tx_queues. |
| */ |
| static void netif_setup_tc(struct net_device *dev, unsigned int txq) |
| { |
| int i; |
| struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; |
| |
| /* If TC0 is invalidated disable TC mapping */ |
| if (tc->offset + tc->count > txq) { |
| pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); |
| dev->num_tc = 0; |
| return; |
| } |
| |
| /* Invalidated prio to tc mappings set to TC0 */ |
| for (i = 1; i < TC_BITMASK + 1; i++) { |
| int q = netdev_get_prio_tc_map(dev, i); |
| |
| tc = &dev->tc_to_txq[q]; |
| if (tc->offset + tc->count > txq) { |
| pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", |
| i, q); |
| netdev_set_prio_tc_map(dev, i, 0); |
| } |
| } |
| } |
| |
| #ifdef CONFIG_XPS |
| static DEFINE_MUTEX(xps_map_mutex); |
| #define xmap_dereference(P) \ |
| rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) |
| |
| static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, |
| int cpu, u16 index) |
| { |
| struct xps_map *map = NULL; |
| int pos; |
| |
| if (dev_maps) |
| map = xmap_dereference(dev_maps->cpu_map[cpu]); |
| |
| for (pos = 0; map && pos < map->len; pos++) { |
| if (map->queues[pos] == index) { |
| if (map->len > 1) { |
| map->queues[pos] = map->queues[--map->len]; |
| } else { |
| RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); |
| kfree_rcu(map, rcu); |
| map = NULL; |
| } |
| break; |
| } |
| } |
| |
| return map; |
| } |
| |
| static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) |
| { |
| struct xps_dev_maps *dev_maps; |
| int cpu, i; |
| bool active = false; |
| |
| mutex_lock(&xps_map_mutex); |
| dev_maps = xmap_dereference(dev->xps_maps); |
| |
| if (!dev_maps) |
| goto out_no_maps; |
| |
| for_each_possible_cpu(cpu) { |
| for (i = index; i < dev->num_tx_queues; i++) { |
| if (!remove_xps_queue(dev_maps, cpu, i)) |
| break; |
| } |
| if (i == dev->num_tx_queues) |
| active = true; |
| } |
| |
| if (!active) { |
| RCU_INIT_POINTER(dev->xps_maps, NULL); |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| for (i = index; i < dev->num_tx_queues; i++) |
| netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), |
| NUMA_NO_NODE); |
| |
| out_no_maps: |
| mutex_unlock(&xps_map_mutex); |
| } |
| |
| static struct xps_map *expand_xps_map(struct xps_map *map, |
| int cpu, u16 index) |
| { |
| struct xps_map *new_map; |
| int alloc_len = XPS_MIN_MAP_ALLOC; |
| int i, pos; |
| |
| for (pos = 0; map && pos < map->len; pos++) { |
| if (map->queues[pos] != index) |
| continue; |
| return map; |
| } |
| |
| /* Need to add queue to this CPU's existing map */ |
| if (map) { |
| if (pos < map->alloc_len) |
| return map; |
| |
| alloc_len = map->alloc_len * 2; |
| } |
| |
| /* Need to allocate new map to store queue on this CPU's map */ |
| new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, |
| cpu_to_node(cpu)); |
| if (!new_map) |
| return NULL; |
| |
| for (i = 0; i < pos; i++) |
| new_map->queues[i] = map->queues[i]; |
| new_map->alloc_len = alloc_len; |
| new_map->len = pos; |
| |
| return new_map; |
| } |
| |
| int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
| u16 index) |
| { |
| struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; |
| struct xps_map *map, *new_map; |
| int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); |
| int cpu, numa_node_id = -2; |
| bool active = false; |
| |
| mutex_lock(&xps_map_mutex); |
| |
| dev_maps = xmap_dereference(dev->xps_maps); |
| |
| /* allocate memory for queue storage */ |
| for_each_online_cpu(cpu) { |
| if (!cpumask_test_cpu(cpu, mask)) |
| continue; |
| |
| if (!new_dev_maps) |
| new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); |
| if (!new_dev_maps) { |
| mutex_unlock(&xps_map_mutex); |
| return -ENOMEM; |
| } |
| |
| map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : |
| NULL; |
| |
| map = expand_xps_map(map, cpu, index); |
| if (!map) |
| goto error; |
| |
| RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); |
| } |
| |
| if (!new_dev_maps) |
| goto out_no_new_maps; |
| |
| for_each_possible_cpu(cpu) { |
| if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { |
| /* add queue to CPU maps */ |
| int pos = 0; |
| |
| map = xmap_dereference(new_dev_maps->cpu_map[cpu]); |
| while ((pos < map->len) && (map->queues[pos] != index)) |
| pos++; |
| |
| if (pos == map->len) |
| map->queues[map->len++] = index; |
| #ifdef CONFIG_NUMA |
| if (numa_node_id == -2) |
| numa_node_id = cpu_to_node(cpu); |
| else if (numa_node_id != cpu_to_node(cpu)) |
| numa_node_id = -1; |
| #endif |
| } else if (dev_maps) { |
| /* fill in the new device map from the old device map */ |
| map = xmap_dereference(dev_maps->cpu_map[cpu]); |
| RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); |
| } |
| |
| } |
| |
| rcu_assign_pointer(dev->xps_maps, new_dev_maps); |
| |
| /* Cleanup old maps */ |
| if (dev_maps) { |
| for_each_possible_cpu(cpu) { |
| new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); |
| map = xmap_dereference(dev_maps->cpu_map[cpu]); |
| if (map && map != new_map) |
| kfree_rcu(map, rcu); |
| } |
| |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| dev_maps = new_dev_maps; |
| active = true; |
| |
| out_no_new_maps: |
| /* update Tx queue numa node */ |
| netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), |
| (numa_node_id >= 0) ? numa_node_id : |
| NUMA_NO_NODE); |
| |
| if (!dev_maps) |
| goto out_no_maps; |
| |
| /* removes queue from unused CPUs */ |
| for_each_possible_cpu(cpu) { |
| if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) |
| continue; |
| |
| if (remove_xps_queue(dev_maps, cpu, index)) |
| active = true; |
| } |
| |
| /* free map if not active */ |
| if (!active) { |
| RCU_INIT_POINTER(dev->xps_maps, NULL); |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| out_no_maps: |
| mutex_unlock(&xps_map_mutex); |
| |
| return 0; |
| error: |
| /* remove any maps that we added */ |
| for_each_possible_cpu(cpu) { |
| new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); |
| map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : |
| NULL; |
| if (new_map && new_map != map) |
| kfree(new_map); |
| } |
| |
| mutex_unlock(&xps_map_mutex); |
| |
| kfree(new_dev_maps); |
| return -ENOMEM; |
| } |
| EXPORT_SYMBOL(netif_set_xps_queue); |
| |
| #endif |
| /* |
| * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues |
| * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. |
| */ |
| int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) |
| { |
| int rc; |
| |
| if (txq < 1 || txq > dev->num_tx_queues) |
| return -EINVAL; |
| |
| if (dev->reg_state == NETREG_REGISTERED || |
| dev->reg_state == NETREG_UNREGISTERING) { |
| ASSERT_RTNL(); |
| |
| rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, |
| txq); |
| if (rc) |
| return rc; |
| |
| if (dev->num_tc) |
| netif_setup_tc(dev, txq); |
| |
| if (txq < dev->real_num_tx_queues) { |
| qdisc_reset_all_tx_gt(dev, txq); |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues_gt(dev, txq); |
| #endif |
| } |
| } |
| |
| dev->real_num_tx_queues = txq; |
| return 0; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_tx_queues); |
| |
| #ifdef CONFIG_SYSFS |
| /** |
| * netif_set_real_num_rx_queues - set actual number of RX queues used |
| * @dev: Network device |
| * @rxq: Actual number of RX queues |
| * |
| * This must be called either with the rtnl_lock held or before |
| * registration of the net device. Returns 0 on success, or a |
| * negative error code. If called before registration, it always |
| * succeeds. |
| */ |
| int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) |
| { |
| int rc; |
| |
| if (rxq < 1 || rxq > dev->num_rx_queues) |
| return -EINVAL; |
| |
| if (dev->reg_state == NETREG_REGISTERED) { |
| ASSERT_RTNL(); |
| |
| rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, |
| rxq); |
| if (rc) |
| return rc; |
| } |
| |
| dev->real_num_rx_queues = rxq; |
| return 0; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_rx_queues); |
| #endif |
| |
| /** |
| * netif_get_num_default_rss_queues - default number of RSS queues |
| * |
| * This routine should set an upper limit on the number of RSS queues |
| * used by default by multiqueue devices. |
| */ |
| int netif_get_num_default_rss_queues(void) |
| { |
| return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); |
| } |
| EXPORT_SYMBOL(netif_get_num_default_rss_queues); |
| |
| static inline void __netif_reschedule(struct Qdisc *q) |
| { |
| struct softnet_data *sd; |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| sd = this_cpu_ptr(&softnet_data); |
| q->next_sched = NULL; |
| *sd->output_queue_tailp = q; |
| sd->output_queue_tailp = &q->next_sched; |
| raise_softirq_irqoff(NET_TX_SOFTIRQ); |
| local_irq_restore(flags); |
| } |
| |
| void __netif_schedule(struct Qdisc *q) |
| { |
| if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) |
| __netif_reschedule(q); |
| } |
| EXPORT_SYMBOL(__netif_schedule); |
| |
| struct dev_kfree_skb_cb { |
| enum skb_free_reason reason; |
| }; |
| |
| static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) |
| { |
| return (struct dev_kfree_skb_cb *)skb->cb; |
| } |
| |
| void netif_schedule_queue(struct netdev_queue *txq) |
| { |
| rcu_read_lock(); |
| if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { |
| struct Qdisc *q = rcu_dereference(txq->qdisc); |
| |
| __netif_schedule(q); |
| } |
| rcu_read_unlock(); |
| } |
| EXPORT_SYMBOL(netif_schedule_queue); |
| |
| /** |
| * netif_wake_subqueue - allow sending packets on subqueue |
| * @dev: network device |
| * @queue_index: sub queue index |
| * |
| * Resume individual transmit queue of a device with multiple transmit queues. |
| */ |
| void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
| { |
| struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
| |
| if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { |
| struct Qdisc *q; |
| |
| rcu_read_lock(); |
| q = rcu_dereference(txq->qdisc); |
| __netif_schedule(q); |
| rcu_read_unlock(); |
| } |
| } |
| EXPORT_SYMBOL(netif_wake_subqueue); |
| |
| void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
| { |
| if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { |
| struct Qdisc *q; |
| |
| rcu_read_lock(); |
| q = rcu_dereference(dev_queue->qdisc); |
| __netif_schedule(q); |
| rcu_read_unlock(); |
| } |
| } |
| EXPORT_SYMBOL(netif_tx_wake_queue); |
| |
| void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) |
| { |
| unsigned long flags; |
| |
| if (likely(atomic_read(&skb->users) == 1)) { |
| smp_rmb(); |
| atomic_set(&skb->users, 0); |
| } else if (likely(!atomic_dec_and_test(&skb->users))) { |
| return; |
| } |
| get_kfree_skb_cb(skb)->reason = reason; |
| local_irq_save(flags); |
| skb->next = __this_cpu_read(softnet_data.completion_queue); |
| __this_cpu_write(softnet_data.completion_queue, skb); |
| raise_softirq_irqoff(NET_TX_SOFTIRQ); |
| local_irq_restore(flags); |
| } |
| EXPORT_SYMBOL(__dev_kfree_skb_irq); |
| |
| void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) |
| { |
| if (in_irq() || irqs_disabled()) |
| __dev_kfree_skb_irq(skb, reason); |
| else |
| dev_kfree_skb(skb); |
| } |
| EXPORT_SYMBOL(__dev_kfree_skb_any); |
| |
| |
| /** |
| * netif_device_detach - mark device as removed |
| * @dev: network device |
| * |
| * Mark device as removed from system and therefore no longer available. |
| */ |
| void netif_device_detach(struct net_device *dev) |
| { |
| if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
| netif_running(dev)) { |
| netif_tx_stop_all_queues(dev); |
| } |
| } |
| EXPORT_SYMBOL(netif_device_detach); |
| |
| /** |
| * netif_device_attach - mark device as attached |
| * @dev: network device |
| * |
| * Mark device as attached from system and restart if needed. |
| */ |
| void netif_device_attach(struct net_device *dev) |
| { |
| if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
| netif_running(dev)) { |
| netif_tx_wake_all_queues(dev); |
| __netdev_watchdog_up(dev); |
| } |
| } |
| EXPORT_SYMBOL(netif_device_attach); |
| |
| /* |
| * Returns a Tx hash based on the given packet descriptor a Tx queues' number |
| * to be used as a distribution range. |
| */ |
| u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, |
| unsigned int num_tx_queues) |
| { |
| u32 hash; |
| u16 qoffset = 0; |
| u16 qcount = num_tx_queues; |
| |
| if (skb_rx_queue_recorded(skb)) { |
| hash = skb_get_rx_queue(skb); |
| while (unlikely(hash >= num_tx_queues)) |
| hash -= num_tx_queues; |
| return hash; |
| } |
| |
| if (dev->num_tc) { |
| u8 tc = netdev_get_prio_tc_map(dev, skb->priority); |
| qoffset = dev->tc_to_txq[tc].offset; |
| qcount = dev->tc_to_txq[tc].count; |
| } |
| |
| return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; |
| } |
| EXPORT_SYMBOL(__skb_tx_hash); |
| |
| static void skb_warn_bad_offload(const struct sk_buff *skb) |
| { |
| static const netdev_features_t null_features = 0; |
| struct net_device *dev = skb->dev; |
| const char *driver = ""; |
| |
| if (!net_ratelimit()) |
| return; |
| |
| if (dev && dev->dev.parent) |
| driver = dev_driver_string(dev->dev.parent); |
| |
| WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " |
| "gso_type=%d ip_summed=%d\n", |
| driver, dev ? &dev->features : &null_features, |
| skb->sk ? &skb->sk->sk_route_caps : &null_features, |
| skb->len, skb->data_len, skb_shinfo(skb)->gso_size, |
| skb_shinfo(skb)->gso_type, skb->ip_summed); |
| } |
| |
| /* |
| * Invalidate hardware checksum when packet is to be mangled, and |
| * complete checksum manually on outgoing path. |
| */ |
| int skb_checksum_help(struct sk_buff *skb) |
| { |
| __wsum csum; |
| int ret = 0, offset; |
| |
| if (skb->ip_summed == CHECKSUM_COMPLETE) |
| goto out_set_summed; |
| |
| if (unlikely(skb_shinfo(skb)->gso_size)) { |
| skb_warn_bad_offload(skb); |
| return -EINVAL; |
| } |
| |
| /* Before computing a checksum, we should make sure no frag could |
| * be modified by an external entity : checksum could be wrong. |
| */ |
| if (skb_has_shared_frag(skb)) { |
| ret = __skb_linearize(skb); |
| if (ret) |
| goto out; |
| } |
| |
| offset = skb_checksum_start_offset(skb); |
| BUG_ON(offset >= skb_headlen(skb)); |
| csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| |
| offset += skb->csum_offset; |
| BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); |
| |
| if (skb_cloned(skb) && |
| !skb_clone_writable(skb, offset + sizeof(__sum16))) { |
| ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| if (ret) |
| goto out; |
| } |
| |
| *(__sum16 *)(skb->data + offset) = csum_fold(csum); |
| out_set_summed: |
| skb->ip_summed = CHECKSUM_NONE; |
| out: |
| return ret; |
| } |
| EXPORT_SYMBOL(skb_checksum_help); |
| |
| __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
| { |
| __be16 type = skb->protocol; |
| |
| /* Tunnel gso handlers can set protocol to ethernet. */ |
| if (type == htons(ETH_P_TEB)) { |
| struct ethhdr *eth; |
| |
| if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) |
| return 0; |
| |
| eth = (struct ethhdr *)skb_mac_header(skb); |
| type = eth->h_proto; |
| } |
| |
| return __vlan_get_protocol(skb, type, depth); |
| } |
| |
| /** |
| * skb_mac_gso_segment - mac layer segmentation handler. |
| * @skb: buffer to segment |
| * @features: features for the output path (see dev->features) |
| */ |
| struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
| struct packet_offload *ptype; |
| int vlan_depth = skb->mac_len; |
| __be16 type = skb_network_protocol(skb, &vlan_depth); |
| |
| if (unlikely(!type)) |
| return ERR_PTR(-EINVAL); |
| |
| __skb_pull(skb, vlan_depth); |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(ptype, &offload_base, list) { |
| if (ptype->type == type && ptype->callbacks.gso_segment) { |
| segs = ptype->callbacks.gso_segment(skb, features); |
| break; |
| } |
| } |
| rcu_read_unlock(); |
| |
| __skb_push(skb, skb->data - skb_mac_header(skb)); |
| |
| return segs; |
| } |
| EXPORT_SYMBOL(skb_mac_gso_segment); |
| |
| |
| /* openvswitch calls this on rx path, so we need a different check. |
| */ |
| static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) |
| { |
| if (tx_path) |
| return skb->ip_summed != CHECKSUM_PARTIAL; |
| else |
| return skb->ip_summed == CHECKSUM_NONE; |
| } |
| |
| /** |
| * __skb_gso_segment - Perform segmentation on skb. |
| * @skb: buffer to segment |
| * @features: features for the output path (see dev->features) |
| * @tx_path: whether it is called in TX path |
| * |
| * This function segments the given skb and returns a list of segments. |
| * |
| * It may return NULL if the skb requires no segmentation. This is |
| * only possible when GSO is used for verifying header integrity. |
| */ |
| struct sk_buff *__skb_gso_segment(struct sk_buff *skb, |
| netdev_features_t features, bool tx_path) |
| { |
| if (unlikely(skb_needs_check(skb, tx_path))) { |
| int err; |
| |
| skb_warn_bad_offload(skb); |
| |
| err = skb_cow_head(skb, 0); |
| if (err < 0) |
| return ERR_PTR(err); |
| } |
| |
| SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); |
| SKB_GSO_CB(skb)->encap_level = 0; |
| |
| skb_reset_mac_header(skb); |
| skb_reset_mac_len(skb); |
| |
| return skb_mac_gso_segment(skb, features); |
| } |
| EXPORT_SYMBOL(__skb_gso_segment); |
| |
| /* Take action when hardware reception checksum errors are detected. */ |
| #ifdef CONFIG_BUG |
| void netdev_rx_csum_fault(struct net_device *dev) |
| { |
| if (net_ratelimit()) { |
| pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); |
| dump_stack(); |
| } |
| } |
| EXPORT_SYMBOL(netdev_rx_csum_fault); |
| #endif |
| |
| /* Actually, we should eliminate this check as soon as we know, that: |
| * 1. IOMMU is present and allows to map all the memory. |
| * 2. No high memory really exists on this machine. |
| */ |
| |
| static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
| { |
| #ifdef CONFIG_HIGHMEM |
| int i; |
| if (!(dev->features & NETIF_F_HIGHDMA)) { |
| for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| if (PageHighMem(skb_frag_page(frag))) |
| return 1; |
| } |
| } |
| |
| if (PCI_DMA_BUS_IS_PHYS) { |
| struct device *pdev = dev->dev.parent; |
| |
| if (!pdev) |
| return 0; |
| for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| dma_addr_t addr = page_to_phys(skb_frag_page(frag)); |
| if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) |
| return 1; |
| } |
| } |
| #endif |
| return 0; |
| } |
| |
| /* If MPLS offload request, verify we are testing hardware MPLS features |
| * instead of standard features for the netdev. |
| */ |
| #if IS_ENABLED(CONFIG_NET_MPLS_GSO) |
| static netdev_features_t net_mpls_features(struct sk_buff *skb, |
| netdev_features_t features, |
| __be16 type) |
| { |
| if (eth_p_mpls(type)) |
| features &= skb->dev->mpls_features; |
| |
| return features; |
| } |
| #else |
| static netdev_features_t net_mpls_features(struct sk_buff *skb, |
| netdev_features_t features, |
| __be16 type) |
| { |
| return features; |
| } |
| #endif |
| |
| static netdev_features_t harmonize_features(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| int tmp; |
| __be16 type; |
| |
| type = skb_network_protocol(skb, &tmp); |
| features = net_mpls_features(skb, features, type); |
| |
| if (skb->ip_summed != CHECKSUM_NONE && |
| !can_checksum_protocol(features, type)) { |
| features &= ~NETIF_F_ALL_CSUM; |
| } else if (illegal_highdma(skb->dev, skb)) { |
| features &= ~NETIF_F_SG; |
| } |
| |
| return features; |
| } |
| |
| netdev_features_t passthru_features_check(struct sk_buff *skb, |
| struct net_device *dev, |
| netdev_features_t features) |
| { |
| return features; |
| } |
| EXPORT_SYMBOL(passthru_features_check); |
| |
| static netdev_features_t dflt_features_check(const struct sk_buff *skb, |
| struct net_device *dev, |
| netdev_features_t features) |
| { |
| return vlan_features_check(skb, features); |
| } |
| |
| netdev_features_t netif_skb_features(struct sk_buff *skb) |
| { |
| struct net_device *dev = skb->dev; |
| netdev_features_t features = dev->features; |
| u16 gso_segs = skb_shinfo(skb)->gso_segs; |
| |
| if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) |
| features &= ~NETIF_F_GSO_MASK; |
| |
| /* If encapsulation offload request, verify we are testing |
| * hardware encapsulation features instead of standard |
| * features for the netdev |
| */ |
| if (skb->encapsulation) |
| features &= dev->hw_enc_features; |
| |
| if (skb_vlan_tagged(skb)) |
| features = netdev_intersect_features(features, |
| dev->vlan_features | |
| NETIF_F_HW_VLAN_CTAG_TX | |
| NETIF_F_HW_VLAN_STAG_TX); |
| |
| if (dev->netdev_ops->ndo_features_check) |
| features &= dev->netdev_ops->ndo_features_check(skb, dev, |
| features); |
| else |
| features &= dflt_features_check(skb, dev, features); |
| |
| return harmonize_features(skb, features); |
| } |
| EXPORT_SYMBOL(netif_skb_features); |
| |
| static int xmit_one(struct sk_buff *skb, struct net_device *dev, |
| struct netdev_queue *txq, bool more) |
| { |
| unsigned int len; |
| int rc; |
| |
| if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) |
| dev_queue_xmit_nit(skb, dev); |
| |
| len = skb->len; |
| trace_net_dev_start_xmit(skb, dev); |
| rc = netdev_start_xmit(skb, dev, txq, more); |
| trace_net_dev_xmit(skb, rc, dev, len); |
| |
| return rc; |
| } |
| |
| struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, |
| struct netdev_queue *txq, int *ret) |
| { |
| struct sk_buff *skb = first; |
| int rc = NETDEV_TX_OK; |
| |
| while (skb) { |
| struct sk_buff *next = skb->next; |
| |
| skb->next = NULL; |
| rc = xmit_one(skb, dev, txq, next != NULL); |
| if (unlikely(!dev_xmit_complete(rc))) { |
| skb->next = next; |
| goto out; |
| } |
| |
| skb = next; |
| if (netif_xmit_stopped(txq) && skb) { |
| rc = NETDEV_TX_BUSY; |
| break; |
| } |
| } |
| |
| out: |
| *ret = rc; |
| return skb; |
| } |
| |
| static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| if (skb_vlan_tag_present(skb) && |
| !vlan_hw_offload_capable(features, skb->vlan_proto)) |
| skb = __vlan_hwaccel_push_inside(skb); |
| return skb; |
| } |
| |
| static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) |
| { |
| netdev_features_t features; |
| |
| if (skb->next) |
| return skb; |
| |
| features = netif_skb_features(skb); |
| skb = validate_xmit_vlan(skb, features); |
| if (unlikely(!skb)) |
| goto out_null; |
| |
| if (netif_needs_gso(skb, features)) { |
| struct sk_buff *segs; |
| |
| segs = skb_gso_segment(skb, features); |
| if (IS_ERR(segs)) { |
| goto out_kfree_skb; |
| } else if (segs) { |
| consume_skb(skb); |
| skb = segs; |
| } |
| } else { |
| if (skb_needs_linearize(skb, features) && |
| __skb_linearize(skb)) |
| goto out_kfree_skb; |
| |
| /* If packet is not checksummed and device does not |
| * support checksumming for this protocol, complete |
| * checksumming here. |
| */ |
| if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| if (skb->encapsulation) |
| skb_set_inner_transport_header(skb, |
| skb_checksum_start_offset(skb)); |
| else |
| skb_set_transport_header(skb, |
| skb_checksum_start_offset(skb)); |
| if (!(features & NETIF_F_ALL_CSUM) && |
| skb_checksum_help(skb)) |
| goto out_kfree_skb; |
| } |
| } |
| |
| return skb; |
| |
| out_kfree_skb: |
| kfree_skb(skb); |
| out_null: |
| return NULL; |
| } |
| |
| struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) |
| { |
| struct sk_buff *next, *head = NULL, *tail; |
| |
| for (; skb != NULL; skb = next) { |
| next = skb->next; |
| skb->next = NULL; |
| |
| /* in case skb wont be segmented, point to itself */ |
| skb->prev = skb; |
| |
| skb = validate_xmit_skb(skb, dev); |
| if (!skb) |
| continue; |
| |
| if (!head) |
| head = skb; |
| else |
| tail->next = skb; |
| /* If skb was segmented, skb->prev points to |
| * the last segment. If not, it still contains skb. |
| */ |
| tail = skb->prev; |
| } |
| return head; |
| } |
| |
| static void qdisc_pkt_len_init(struct sk_buff *skb) |
| { |
| const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| |
| qdisc_skb_cb(skb)->pkt_len = skb->len; |
| |
| /* To get more precise estimation of bytes sent on wire, |
| * we add to pkt_len the headers size of all segments |
| */ |
| if (shinfo->gso_size) { |
| unsigned int hdr_len; |
| u16 gso_segs = shinfo->gso_segs; |
| |
| /* mac layer + network layer */ |
| hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
| |
| /* + transport layer */ |
| if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
| hdr_len += tcp_hdrlen(skb); |
| else |
| hdr_len += sizeof(struct udphdr); |
| |
| if (shinfo->gso_type & SKB_GSO_DODGY) |
| gso_segs = DIV_ROUND_UP(skb->len - hdr_len, |
| shinfo->gso_size); |
| |
| qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; |
| } |
| } |
| |
| static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, |
| struct net_device *dev, |
| struct netdev_queue *txq) |
| { |
| spinlock_t *root_lock = qdisc_lock(q); |
| bool contended; |
| int rc; |
| |
| qdisc_pkt_len_init(skb); |
| qdisc_calculate_pkt_len(skb, q); |
| /* |
| * Heuristic to force contended enqueues to serialize on a |
| * separate lock before trying to get qdisc main lock. |
| * This permits __QDISC___STATE_RUNNING owner to get the lock more |
| * often and dequeue packets faster. |
| */ |
| contended = qdisc_is_running(q); |
| if (unlikely(contended)) |
| spin_lock(&q->busylock); |
| |
| spin_lock(root_lock); |
| if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
| kfree_skb(skb); |
| rc = NET_XMIT_DROP; |
| } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && |
| qdisc_run_begin(q)) { |
| /* |
| * This is a work-conserving queue; there are no old skbs |
| * waiting to be sent out; and the qdisc is not running - |
| * xmit the skb directly. |
| */ |
| |
| qdisc_bstats_update(q, skb); |
| |
| if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { |
| if (unlikely(contended)) { |
| spin_unlock(&q->busylock); |
| contended = false; |
| } |
| __qdisc_run(q); |
| } else |
| qdisc_run_end(q); |
| |
| rc = NET_XMIT_SUCCESS; |
| } else { |
| rc = q->enqueue(skb, q) & NET_XMIT_MASK; |
| if (qdisc_run_begin(q)) { |
| if (unlikely(contended)) { |
| spin_unlock(&q->busylock); |
| contended = false; |
| } |
| __qdisc_run(q); |
| } |
| } |
| spin_unlock(root_lock); |
| if (unlikely(contended)) |
| spin_unlock(&q->busylock); |
| return rc; |
| } |
| |
| #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
| static void skb_update_prio(struct sk_buff *skb) |
| { |
| struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); |
| |
| if (!skb->priority && skb->sk && map) { |
| unsigned int prioidx = skb->sk->sk_cgrp_prioidx; |
| |
| if (prioidx < map->priomap_len) |
| skb->priority = map->priomap[prioidx]; |
| } |
| } |
| #else |
| #define skb_update_prio(skb) |
| #endif |
| |
| DEFINE_PER_CPU(int, xmit_recursion); |
| EXPORT_SYMBOL(xmit_recursion); |
| |
| #define RECURSION_LIMIT 10 |
| |
| /** |
| * dev_loopback_xmit - loop back @skb |
| * @skb: buffer to transmit |
| */ |
| int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb) |
| { |
| skb_reset_mac_header(skb); |
| __skb_pull(skb, skb_network_offset(skb)); |
| skb->pkt_type = PACKET_LOOPBACK; |
| skb->ip_summed = CHECKSUM_UNNECESSARY; |
| WARN_ON(!skb_dst(skb)); |
| skb_dst_force(skb); |
| netif_rx_ni(skb); |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_loopback_xmit); |
| |
| static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) |
| { |
| #ifdef CONFIG_XPS |
| struct xps_dev_maps *dev_maps; |
| struct xps_map *map; |
| int queue_index = -1; |
| |
| rcu_read_lock(); |
| dev_maps = rcu_dereference(dev->xps_maps); |
| if (dev_maps) { |
| map = rcu_dereference( |
| dev_maps->cpu_map[skb->sender_cpu - 1]); |
| if (map) { |
| if (map->len == 1) |
| queue_index = map->queues[0]; |
| else |
| queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), |
| map->len)]; |
| if (unlikely(queue_index >= dev->real_num_tx_queues)) |
| queue_index = -1; |
| } |
| } |
| rcu_read_unlock(); |
| |
| return queue_index; |
| #else |
| return -1; |
| #endif |
| } |
| |
| static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) |
| { |
| struct sock *sk = skb->sk; |
| int queue_index = sk_tx_queue_get(sk); |
| |
| if (queue_index < 0 || skb->ooo_okay || |
| queue_index >= dev->real_num_tx_queues) { |
| int new_index = get_xps_queue(dev, skb); |
| if (new_index < 0) |
| new_index = skb_tx_hash(dev, skb); |
| |
| if (queue_index != new_index && sk && |
| rcu_access_pointer(sk->sk_dst_cache)) |
| sk_tx_queue_set(sk, new_index); |
| |
| queue_index = new_index; |
| } |
| |
| return queue_index; |
| } |
| |
| struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
| struct sk_buff *skb, |
| void *accel_priv) |
| { |
| int queue_index = 0; |
| |
| #ifdef CONFIG_XPS |
| if (skb->sender_cpu == 0) |
| skb->sender_cpu = raw_smp_processor_id() + 1; |
| #endif |
| |
| if (dev->real_num_tx_queues != 1) { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| if (ops->ndo_select_queue) |
| queue_index = ops->ndo_select_queue(dev, skb, accel_priv, |
| __netdev_pick_tx); |
| else |
| queue_index = __netdev_pick_tx(dev, skb); |
| |
| if (!accel_priv) |
| queue_index = netdev_cap_txqueue(dev, queue_index); |
| } |
| |
| skb_set_queue_mapping(skb, queue_index); |
| return netdev_get_tx_queue(dev, queue_index); |
| } |
| |
| /** |
| * __dev_queue_xmit - transmit a buffer |
| * @skb: buffer to transmit |
| * @accel_priv: private data used for L2 forwarding offload |
| * |
| * Queue a buffer for transmission to a network device. The caller must |
| * have set the device and priority and built the buffer before calling |
| * this function. The function can be called from an interrupt. |
| * |
| * A negative errno code is returned on a failure. A success does not |
| * guarantee the frame will be transmitted as it may be dropped due |
| * to congestion or traffic shaping. |
| * |
| * ----------------------------------------------------------------------------------- |
| * I notice this method can also return errors from the queue disciplines, |
| * including NET_XMIT_DROP, which is a positive value. So, errors can also |
| * be positive. |
| * |
| * Regardless of the return value, the skb is consumed, so it is currently |
| * difficult to retry a send to this method. (You can bump the ref count |
| * before sending to hold a reference for retry if you are careful.) |
| * |
| * When calling this method, interrupts MUST be enabled. This is because |
| * the BH enable code must have IRQs enabled so that it will not deadlock. |
| * --BLG |
| */ |
| static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
| { |
| struct net_device *dev = skb->dev; |
| struct netdev_queue *txq; |
| struct Qdisc *q; |
| int rc = -ENOMEM; |
| |
| skb_reset_mac_header(skb); |
| |
| if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) |
| __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); |
| |
| /* Disable soft irqs for various locks below. Also |
| * stops preemption for RCU. |
| */ |
| rcu_read_lock_bh(); |
| |
| skb_update_prio(skb); |
| |
| /* If device/qdisc don't need skb->dst, release it right now while |
| * its hot in this cpu cache. |
| */ |
| if (dev->priv_flags & IFF_XMIT_DST_RELEASE) |
| skb_dst_drop(skb); |
| else |
| skb_dst_force(skb); |
| |
| #ifdef CONFIG_NET_SWITCHDEV |
| /* Don't forward if offload device already forwarded */ |
| if (skb->offload_fwd_mark && |
| skb->offload_fwd_mark == dev->offload_fwd_mark) { |
| consume_skb(skb); |
| rc = NET_XMIT_SUCCESS; |
| goto out; |
| } |
| #endif |
| |
| txq = netdev_pick_tx(dev, skb, accel_priv); |
| q = rcu_dereference_bh(txq->qdisc); |
| |
| #ifdef CONFIG_NET_CLS_ACT |
| skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); |
| #endif |
| trace_net_dev_queue(skb); |
| if (q->enqueue) { |
| rc = __dev_xmit_skb(skb, q, dev, txq); |
| goto out; |
| } |
| |
| /* The device has no queue. Common case for software devices: |
| loopback, all the sorts of tunnels... |
| |
| Really, it is unlikely that netif_tx_lock protection is necessary |
| here. (f.e. loopback and IP tunnels are clean ignoring statistics |
| counters.) |
| However, it is possible, that they rely on protection |
| made by us here. |
| |
| Check this and shot the lock. It is not prone from deadlocks. |
| Either shot noqueue qdisc, it is even simpler 8) |
| */ |
| if (dev->flags & IFF_UP) { |
| int cpu = smp_processor_id(); /* ok because BHs are off */ |
| |
| if (txq->xmit_lock_owner != cpu) { |
| |
| if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) |
| goto recursion_alert; |
| |
| skb = validate_xmit_skb(skb, dev); |
| if (!skb) |
| goto drop; |
| |
| HARD_TX_LOCK(dev, txq, cpu); |
| |
| if (!netif_xmit_stopped(txq)) { |
| __this_cpu_inc(xmit_recursion); |
| skb = dev_hard_start_xmit(skb, dev, txq, &rc); |
| __this_cpu_dec(xmit_recursion); |
| if (dev_xmit_complete(rc)) { |
| HARD_TX_UNLOCK(dev, txq); |
| goto out; |
| } |
| } |
| HARD_TX_UNLOCK(dev, txq); |
| net_crit_ratelimited("Virtual device %s asks to queue packet!\n", |
| dev->name); |
| } else { |
| /* Recursion is detected! It is possible, |
| * unfortunately |
| */ |
| recursion_alert: |
| net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", |
| dev->name); |
| } |
| } |
| |
| rc = -ENETDOWN; |
| drop: |
| rcu_read_unlock_bh(); |
| |
| atomic_long_inc(&dev->tx_dropped); |
| kfree_skb_list(skb); |
| return rc; |
| out: |
| rcu_read_unlock_bh(); |
| return rc; |
| } |
| |
| int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb) |
| { |
| return __dev_queue_xmit(skb, NULL); |
| } |
| EXPORT_SYMBOL(dev_queue_xmit_sk); |
| |
| int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) |
| { |
| return __dev_queue_xmit(skb, accel_priv); |
| } |
| EXPORT_SYMBOL(dev_queue_xmit_accel); |
| |
| |
| /*======================================================================= |
| Receiver routines |
| =======================================================================*/ |
| |
| int netdev_max_backlog __read_mostly = 1000; |
| EXPORT_SYMBOL(netdev_max_backlog); |
| |
| int netdev_tstamp_prequeue __read_mostly = 1; |
| int netdev_budget __read_mostly = 300; |
| int weight_p __read_mostly = 64; /* old backlog weight */ |
| |
| /* Called with irq disabled */ |
| static inline void ____napi_schedule(struct softnet_data *sd, |
| struct napi_struct *napi) |
| { |
| list_add_tail(&napi->poll_list, &sd->poll_list); |
| __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
| } |
| |
| #ifdef CONFIG_RPS |
| |
| /* One global table that all flow-based protocols share. */ |
| struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; |
| EXPORT_SYMBOL(rps_sock_flow_table); |
| u32 rps_cpu_mask __read_mostly; |
| EXPORT_SYMBOL(rps_cpu_mask); |
| |
| struct static_key rps_needed __read_mostly; |
| |
| static struct rps_dev_flow * |
| set_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
| struct rps_dev_flow *rflow, u16 next_cpu) |
| { |
| if (next_cpu < nr_cpu_ids) { |
| #ifdef CONFIG_RFS_ACCEL |
| struct netdev_rx_queue *rxqueue; |
| struct rps_dev_flow_table *flow_table; |
| struct rps_dev_flow *old_rflow; |
| u32 flow_id; |
| u16 rxq_index; |
| int rc; |
| |
| /* Should we steer this flow to a different hardware queue? */ |
| if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || |
| !(dev->features & NETIF_F_NTUPLE)) |
| goto out; |
| rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); |
| if (rxq_index == skb_get_rx_queue(skb)) |
| goto out; |
| |
| rxqueue = dev->_rx + rxq_index; |
| flow_table = rcu_dereference(rxqueue->rps_flow_table); |
| if (!flow_table) |
| goto out; |
| flow_id = skb_get_hash(skb) & flow_table->mask; |
| rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, |
| rxq_index, flow_id); |
| if (rc < 0) |
| goto out; |
| old_rflow = rflow; |
| rflow = &flow_table->flows[flow_id]; |
| rflow->filter = rc; |
| if (old_rflow->filter == rflow->filter) |
| old_rflow->filter = RPS_NO_FILTER; |
| out: |
| #endif |
| rflow->last_qtail = |
| per_cpu(softnet_data, next_cpu).input_queue_head; |
| } |
| |
| rflow->cpu = next_cpu; |
| return rflow; |
| } |
| |
| /* |
| * get_rps_cpu is called from netif_receive_skb and returns the target |
| * CPU from the RPS map of the receiving queue for a given skb. |
| * rcu_read_lock must be held on entry. |
| */ |
| static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
| struct rps_dev_flow **rflowp) |
| { |
| const struct rps_sock_flow_table *sock_flow_table; |
| struct netdev_rx_queue *rxqueue = dev->_rx; |
| struct rps_dev_flow_table *flow_table; |
| struct rps_map *map; |
| int cpu = -1; |
| u32 tcpu; |
| u32 hash; |
| |
| if (skb_rx_queue_recorded(skb)) { |
| u16 index = skb_get_rx_queue(skb); |
| |
| if (unlikely(index >= dev->real_num_rx_queues)) { |
| WARN_ONCE(dev->real_num_rx_queues > 1, |
| "%s received packet on queue %u, but number " |
| "of RX queues is %u\n", |
| dev->name, index, dev->real_num_rx_queues); |
| goto done; |
| } |
| rxqueue += index; |
| } |
| |
| /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ |
| |
| flow_table = rcu_dereference(rxqueue->rps_flow_table); |
| map = rcu_dereference(rxqueue->rps_map); |
| if (!flow_table && !map) |
| goto done; |
| |
| skb_reset_network_header(skb); |
| hash = skb_get_hash(skb); |
| if (!hash) |
| goto done; |
| |
| sock_flow_table = rcu_dereference(rps_sock_flow_table); |
| if (flow_table && sock_flow_table) { |
| struct rps_dev_flow *rflow; |
| u32 next_cpu; |
| u32 ident; |
| |
| /* First check into global flow table if there is a match */ |
| ident = sock_flow_table->ents[hash & sock_flow_table->mask]; |
| if ((ident ^ hash) & ~rps_cpu_mask) |
| goto try_rp
|